source
stringlengths
3
92
c
stringlengths
26
2.25M
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(8*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(4*t1+Nx+5,64)),floord(8*t2+Nx+4,64)),floord(8*t3+Nx+4,64)),floord(8*t1-8*t2+Nz+Nx+3,64));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),64*t4+62),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
TriangularMesh3D.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_TRIANGULARMESH3D_H #define BK_TRIANGULARMESH3D_H #include <bkDataset/mesh/TriangularMeshBase.h> #include <bkDataset/mesh/TriangularMesh.h> #include <bkDataset/attributes/attribute_info.h> #include <iostream> // todo remove namespace bk { template<> class TriangularMesh<3> : public details::TriangularMeshBase<3> { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = TriangularMesh<3>; using base_type = details::TriangularMeshBase<3>; public: using point_type = typename base_type::point_type; using cell_type = typename base_type::cell_type; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR TriangularMesh() = default; TriangularMesh(const self_type&) = default; TriangularMesh(self_type&&) noexcept = default; /// @} /// @{ -------------------------------------------------- DTOR ~TriangularMesh() = default; /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- HAS NORMALS [[nodiscard]] bool has_point_normals() const { return this->point_attribute_map().has_attribute(attribute_info::normal3d()); } [[nodiscard]] bool has_triangle_normals() const { return this->cell_attribute_map().has_attribute(attribute_info::normal3d()); } [[nodiscard]] bool has_normals() const { return has_point_normals() && has_triangle_normals(); } /// @} /// @{ -------------------------------------------------- GET POINT NORMALS Vec3d& normal_of_point(unsigned int pointId) { return normals_of_points()[pointId]; } const Vec3d& normal_of_point(unsigned int pointId) const { return normals_of_points()[pointId]; } private: std::vector<attribute_info::normal3d_value_type>& normals_of_points() { assert(has_point_normals() && "call calc_normals() first"); return this->point_attribute_vector<attribute_info::normal3d()>(); } public: const std::vector<attribute_info::normal3d_value_type>& normals_of_points() const { assert(has_point_normals() && "call calc_normals() first"); return this->point_attribute_vector<attribute_info::normal3d()>(); } /// @} /// @{ -------------------------------------------------- GET CELL NORMALS Vec3d& normal_of_triangle(unsigned int cellId) { return normals_of_triangles()[cellId]; } const Vec3d& normal_of_triangle(unsigned int cellId) const { return normals_of_triangles()[cellId]; } private: std::vector<attribute_info::normal3d_value_type>& normals_of_triangles() { assert(has_triangle_normals() && "call calc_normals() first"); return this->cell_attribute_vector<attribute_info::normal3d()>(); } public: const std::vector<attribute_info::normal3d_value_type>& normals_of_triangles() const { assert(has_triangle_normals() && "call calc_normals() first"); return this->cell_attribute_vector<attribute_info::normal3d()>(); } /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] auto operator=(const self_type&) -> self_type& = default; [[maybe_unused]] auto operator=(self_type&&) noexcept -> self_type& = default; /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- INIT void init() { base_type::init(); calc_normals(); } /// @} /// @{ -------------------------------------------------- HELPERS: INITIALIZATION protected: void calc_normals_per_triangle() { std::vector<attribute_info::normal3d_value_type>& cell_normals = this->add_cell_attribute_vector<attribute_info::normal3d()>(); #pragma omp parallel for for (unsigned int cellId = 0; cellId < this->topology().num_cells(); ++cellId) { const Cell<3>& c = this->topology().cell(cellId); const Vec3d diff0 = this->geometry().point(c[1]) - this->geometry().point(c[0]); const Vec3d diff1 = this->geometry().point(c[2]) - this->geometry().point(c[0]); cell_normals[cellId] = (diff0).cross(diff1).normalize(); } } void calc_normals_per_point() { const std::vector<attribute_info::normal3d_value_type>& cell_normals = this->cell_attribute_vector<attribute_info::normal3d()>(); std::vector<attribute_info::normal3d_value_type>& point_normals = this->add_point_attribute_vector<attribute_info::normal3d()>(); if (!cell_normals.empty()) { #pragma omp parallel for for (unsigned int pointId = 0; pointId < this->geometry().num_points(); ++pointId) { const std::vector<unsigned int>& triangles = this->topology().cells_of_point(pointId); assert(!triangles.empty() && "this point does not belong to any triangle"); point_normals[pointId].set_zero(); for (unsigned int trianglePointId = 0; trianglePointId < triangles.size(); ++trianglePointId) { point_normals[pointId] += cell_normals[triangles[trianglePointId]]; } point_normals[pointId] /= triangles.size(); //point_normals[pointId].normalize_internal(); // not necessary because the cell normals are already normalized } } } //! heuristic: majority of normals points away from center of points [[maybe_unused]] bool consistent_normal_orientation() { std::vector<attribute_info::normal3d_value_type>& point_normals = normals_of_points(); std::vector<attribute_info::normal3d_value_type>& cell_normals = normals_of_triangles(); if (point_normals.empty() && cell_normals.empty()) { std::cerr << "TriangularMesh::consistent_normal_orientation() - aborting! mesh has neither point nor cell normals!" << std::endl; return false; } const auto meshCenter = this->geometry().center(); if (!point_normals.empty()) { const unsigned int numPoints = this->geometry().num_points(); unsigned int cntAway = 0; unsigned int cntTowards = 0; #pragma omp parallel for reduction(+:cntAway) reduction(+:cntTowards) for (unsigned int pointId = 0; pointId < numPoints; ++pointId) { Vec3d dirToCenter = meshCenter - this->geometry().point(pointId); const double n = dirToCenter.norm(); if (bk::equals_approx(n, 0)) { continue; } dirToCenter /= n; if (dirToCenter.dot(point_normals[pointId]) < 0) { ++cntAway; } else { ++cntTowards; } } if (cntTowards > cntAway) { // more normals point towards the center than away from it -> flip normals for (unsigned int pointId = 0; pointId < numPoints; ++pointId) { point_normals[pointId].negate_internal(); } } } if (!cell_normals.empty()) { const unsigned int numCells = this->topology().num_cells(); unsigned int cntAway = 0; unsigned int cntTowards = 0; #pragma omp parallel for reduction(+:cntAway) reduction(+:cntTowards) for (unsigned int cellId = 0; cellId < numCells; ++cellId) { const Cell<3>& triangle = this->topology().cell(cellId); const Vec3d triangleCenter = (this->geometry().point(triangle[0]) + this->geometry().point(triangle[1]) + this->geometry().point(triangle[2])) / 3; Vec3d dirToCenter = meshCenter - triangleCenter; const double n = dirToCenter.norm(); if (bk::equals_approx(n, 0)) { continue; } dirToCenter /= n; if (dirToCenter.dot(cell_normals[cellId]) < 0) { ++cntAway; } else { ++cntTowards; } } // more normals point towards the center than away from it -> flip normals if (cntTowards > cntAway) { for (unsigned int cellId = 0; cellId < numCells; ++cellId) { cell_normals[cellId].negate_internal(); } } } return true; } public: /// @} /// @{ -------------------------------------------------- CALC NORMALS void calc_normals() { calc_normals_per_triangle(); calc_normals_per_point(); consistent_normal_orientation(); } /// @} /// @{ -------------------------------------------------- IS INSIDE [[nodiscard]] bool is_inside(const point_type& queryPoint) const { const bk::KDPointInfo<point_type> closestPoint = this->geometry().closest_point(queryPoint); const attribute_info::normal3d_value_type& nrml = normal_of_point(closestPoint.point_id); const Vec3d dirToPoint = closestPoint.point - queryPoint; return nrml.dot(dirToPoint) >= 0; } /// @} /// @{ -------------------------------------------------- DISTANCE TO SURFACE [[nodiscard]] double distance_to_surface(const point_type& queryPoint) const { const bk::KDPointInfo<point_type> closestPoint = this->geometry().closest_point(queryPoint); const std::vector<unsigned int>& closestTriangles = this->topology().cells_of_point(closestPoint.point_id); if (closestTriangles.empty()) { return closestPoint.distance_to_query; } bool foundProjection = false; double minSqDist = std::numeric_limits<double>::max(); Vec3d minDistProjection(0, 0, 0); //const std::vector<Vec3d>& point_normals = normals_of_points(); const std::vector<attribute_info::normal3d_value_type>& cell_normals = normals_of_triangles(); for (unsigned int t = 0; t < closestTriangles.size(); ++t) { const Cell<3> trianglePointsIds = this->topology().cell(closestTriangles[t]); const point_type& point0 = this->geometry().point(trianglePointsIds[0]); const point_type& point1 = this->geometry().point(trianglePointsIds[1]); const point_type& point2 = this->geometry().point(trianglePointsIds[2]); //const Vec3d& normal0 = point_normals[trianglePointsIds[0]]; //const Vec3d& normal1 = point_normals[trianglePointsIds[1]]; //const Vec3d& normal2 = point_normals[trianglePointsIds[2]]; //const Vec3d cellNormal(((*normal0) + (*normal1) + (*normal2)) / 3.0); const attribute_info::normal3d_value_type& cellNormal = cell_normals[closestTriangles[t]]; /* * ray-triangle intersection by haines & moeller */ // B - A const Vec3d u(point1 - point0); // C - A const Vec3d v(point2 - point0); // P - A const Vec3d w(queryPoint - point0); const Vec3d dvCross(cellNormal.cross(v)); const Vec3d wuCross(w.cross(u)); const double dvCrossUDot = dvCross.dot(u); const double dvCrossWDot = dvCross.dot(w); const double wuCrossDDot = wuCross.dot(cellNormal); const double norm = 1.0 / dvCrossUDot; const double res_r = norm * dvCrossWDot; const double res_s = norm * wuCrossDDot; // projected point in triangle? double sqDist = std::numeric_limits<double>::max(); if (res_r >= 0.0 && res_s >= 0.0 && res_r + res_s <= 1.0) { const Vec3d projection(point0 + res_r * u + res_s * v); sqDist = (projection - queryPoint).norm_squared(); if (sqDist < minSqDist) { minSqDist = sqDist; minDistProjection = projection; foundProjection = true; } } } // for triangles of closest point return foundProjection ? queryPoint.distance(minDistProjection) : closestPoint.distance_to_query; } /// @} //==================================================================================================== //===== I/O //==================================================================================================== /// @{ -------------------------------------------------- SAVE [[maybe_unused]] bool save_matlab(std::string_view filename) const { std::string fname(filename); const std::string matlab_suffix = ".m"; if (fname.empty()) { fname = "trimesh_matlab" + matlab_suffix; } else if (fname.compare(fname.length() - 2, 2, matlab_suffix) != 0) { fname.append(matlab_suffix); } std::ofstream file(fname, std::ios_base::out); if (!file.good()) { return false; } file.precision(4); file << std::fixed; std::stringstream ss_x; ss_x.precision(4); ss_x << std::fixed; std::stringstream ss_y; ss_y.precision(4); ss_y << std::fixed; std::stringstream ss_z; ss_z.precision(4); ss_z << std::fixed; const unsigned int numVert = this->geometry().num_points(); for (unsigned int i = 0; i < numVert; ++i) { const point_type& v = this->geometry().point(i); ss_x << v[0]; ss_y << v[1]; ss_z << v[2]; if (i != numVert - 1) { ss_x << " "; ss_y << " "; ss_z << " "; } } std::stringstream ss_tri; ss_tri.precision(4); ss_tri << std::fixed; const unsigned int numTri = this->topology().num_cells(); for (unsigned int i = 0; i < numTri; ++i) { const Cell<3>& c = this->topology().cell(i); ss_tri << (c[0] + 1) << " " << (c[1] + 1) << " " << (c[2] + 1); if (i != numTri - 1) { ss_tri << "; "; } } file << "clc; clear all; close all;" << std::endl; file << std::endl; file << "x = [" << ss_x.str() << "];" << std::endl; file << "y = [" << ss_y.str() << "];" << std::endl; file << "z = [" << ss_z.str() << "];" << std::endl; file << "tri = [" << ss_tri.str() << "];" << std::endl; file << std::endl; file << "figure('Renderer', 'OpenGL');" << std::endl; file << "hold on;" << std::endl; file << "colormap([0.5 0.5 0.5]);" << std::endl; file << "trisurf(tri, x, y, z);" << std::endl; file << "axis tight equal off;" << std::endl; file << "shading interp;" << std::endl; file << "light('Position', [0 0 -50]);" << std::endl; file << "light('Position', [0 0 50]);" << std::endl; file << "lighting phong;" << std::endl; //file << "view(0, -90)" << std::endl; file << "hold off;" << std::endl; file.close(); return true; } [[maybe_unused]] bool save_obj(std::string_view filename) const { std::string fname(filename); const std::string obj_suffix = ".obj"; if (fname.empty()) { fname = "trimesh" + obj_suffix; } else if (fname.compare(fname.length() - 2, 2, obj_suffix) != 0) { fname.append(obj_suffix); } std::ofstream file(fname, std::ios_base::out); if (!file.good()) { return false; } file.precision(8); file << std::fixed; // object name file << "o TriMesh" << std::endl; // vertices const unsigned int numVert = this->geometry().num_points(); for (unsigned int i = 0; i < numVert; ++i) { const point_type& v = this->geometry().point(i); file << "v "; file << v[0] << " "; file << v[1] << " "; file << v[2] << std::endl; } // normals const std::vector<attribute_info::normal3d_value_type>& pointNormals = normals_of_points(); for (unsigned int i = 0; i < numVert; ++i) { const attribute_info::normal3d_value_type& n = pointNormals[i]; file << "vn "; file << n[0] << " "; file << n[1] << " "; file << n[2] << std::endl; } // triangles for (unsigned int i = 0; i < this->topology().num_cells(); ++i) { const Cell<3>& c = this->topology().cell(i); file << "f "; file << (c[0] + 1) << " "; file << (c[1] + 1) << " "; file << (c[2] + 1) << std::endl; } file.close(); return true; } [[maybe_unused]] bool save_stl(std::string_view filename) const { static_assert(sizeof(float) == 4, "BINARY STL CAN ONLY BE SAVED IF FLOAT IS IMPLEMENTED AS 32 BIT TYPE"); std::string fname(filename); const std::string stl_suffix = ".stl"; if (fname.empty()) { fname = "trimesh" + stl_suffix; } else if (fname.compare(fname.length() - 2, 2, stl_suffix) != 0) { fname.append(stl_suffix); } std::ofstream file(fname, std::ios_base::out | std::ios_base::binary); if (!file.good()) { return false; } //file << "solid TriMesh" << std::endl; std::uint8_t header[80]; file.write(reinterpret_cast<char*>(header), 80 * sizeof(std::uint8_t)); std::uint32_t numTri = static_cast<std::uint32_t>(this->topology().num_cells()); file.write(reinterpret_cast<char*>(&numTri), sizeof(std::uint32_t)); float temp; std::uint16_t attribute_byte_count = 0; const std::vector<attribute_info::normal3d_value_type>& cellNormals = normals_of_triangles(); for (unsigned int i = 0; i < numTri; ++i) { const Cell<3>& tri = this->topology().cell(i); // triangle normal const attribute_info::normal3d_value_type& trinormal = cellNormals[i]; for (int n = 0; n < 3; ++n) { temp = static_cast<float>(trinormal[n]); file.write(reinterpret_cast<char*>(&temp), sizeof(float)); } // 3x vertex for (int k = 0; k < 3; ++k) { const point_type& vert = this->geometry().point(tri[k]); for (int n = 0; n < 3; ++n) { temp = static_cast<float>(vert[n]); file.write(reinterpret_cast<char*>(&temp), sizeof(float)); } } // attribute byte count (0 in most applications) file.write(reinterpret_cast<char*>(&attribute_byte_count), sizeof(std::uint16_t)); } file.close(); return true; } /// @} /// @{ -------------------------------------------------- LOAD [[maybe_unused]] bool load(std::string_view filename) { const bool success = base_type::load(filename); this->init(); // calc normals return success; } /// @} }; // class TriangularMesh } // namespace bk #endif //BK_TRIANGULARMESH3D_H
_phonopy.c
/* Copyright (C) 2011 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <float.h> #include <math.h> #include <numpy/arrayobject.h> #include <stddef.h> #include <stdio.h> #include "phonopy.h" /* PHPYCONST is defined in dynmat.h */ /* Build dynamical matrix */ static PyObject* py_transform_dynmat_to_fc(PyObject* self, PyObject* args); static PyObject* py_perm_trans_symmetrize_fc(PyObject* self, PyObject* args); static PyObject* py_perm_trans_symmetrize_compact_fc(PyObject* self, PyObject* args); static PyObject* py_transpose_compact_fc(PyObject* self, PyObject* args); static PyObject* py_get_dynamical_matrix(PyObject* self, PyObject* args); static PyObject* py_get_nac_dynamical_matrix(PyObject* self, PyObject* args); static PyObject* py_get_recip_dipole_dipole(PyObject* self, PyObject* args); static PyObject* py_get_recip_dipole_dipole_q0(PyObject* self, PyObject* args); static PyObject* py_get_derivative_dynmat(PyObject* self, PyObject* args); static PyObject* py_get_thermal_properties(PyObject* self, PyObject* args); static PyObject* py_distribute_fc2(PyObject* self, PyObject* args); static PyObject* py_compute_permutation(PyObject* self, PyObject* args); static PyObject* py_gsv_set_smallest_vectors_sparse(PyObject* self, PyObject* args); static PyObject* py_gsv_set_smallest_vectors_dense(PyObject* self, PyObject* args); static PyObject* py_thm_relative_grid_address(PyObject* self, PyObject* args); static PyObject* py_thm_all_relative_grid_address(PyObject* self, PyObject* args); static PyObject* py_thm_integration_weight(PyObject* self, PyObject* args); static PyObject* py_thm_integration_weight_at_omegas(PyObject* self, PyObject* args); static PyObject* py_get_tetrahedra_frequenies(PyObject* self, PyObject* args); static PyObject* py_tetrahedron_method_dos(PyObject* self, PyObject* args); struct module_state { PyObject* error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif static PyObject* error_out(PyObject* m) { struct module_state* st = GETSTATE(m); PyErr_SetString(st->error, "something bad happened"); return NULL; } static PyMethodDef _phonopy_methods[] = { {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL}, {"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS, "Transform a set of dynmat to force constants"}, {"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS, "Enforce permutation and translational symmetry of force constants"}, {"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc, METH_VARARGS, "Enforce permutation and translational symmetry of compact force " "constants"}, {"transpose_compact_fc", py_transpose_compact_fc, METH_VARARGS, "Transpose compact force constants"}, {"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS, "Dynamical matrix"}, {"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS, "NAC dynamical matrix"}, {"recip_dipole_dipole", py_get_recip_dipole_dipole, METH_VARARGS, "Reciprocal part of dipole-dipole interaction"}, {"recip_dipole_dipole_q0", py_get_recip_dipole_dipole_q0, METH_VARARGS, "q=0 terms of reciprocal part of dipole-dipole interaction"}, {"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS, "Q derivative of dynamical matrix"}, {"thermal_properties", py_get_thermal_properties, METH_VARARGS, "Thermal properties"}, {"distribute_fc2", py_distribute_fc2, METH_VARARGS, "Distribute force constants for all atoms in atom_list using precomputed " "symmetry mappings."}, {"compute_permutation", py_compute_permutation, METH_VARARGS, "Compute indices of original points in a set of rotated points."}, {"gsv_set_smallest_vectors_sparse", py_gsv_set_smallest_vectors_sparse, METH_VARARGS, "Set shortest vectors in sparse array."}, {"gsv_set_smallest_vectors_dense", py_gsv_set_smallest_vectors_dense, METH_VARARGS, "Set shortest vectors in dense array."}, {"tetrahedra_relative_grid_address", py_thm_relative_grid_address, METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"}, {"all_tetrahedra_relative_grid_address", py_thm_all_relative_grid_address, METH_VARARGS, "4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"}, {"tetrahedra_integration_weight", py_thm_integration_weight, METH_VARARGS, "Integration weight for tetrahedron method"}, {"tetrahedra_integration_weight_at_omegas", py_thm_integration_weight_at_omegas, METH_VARARGS, "Integration weight for tetrahedron method at omegas"}, {"tetrahedra_frequencies", py_get_tetrahedra_frequenies, METH_VARARGS, "Run tetrahedron method"}, {"tetrahedron_method_dos", py_tetrahedron_method_dos, METH_VARARGS, "Run tetrahedron method"}, {NULL, NULL, 0, NULL}}; #if PY_MAJOR_VERSION >= 3 static int _phonopy_traverse(PyObject* m, visitproc visit, void* arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int _phonopy_clear(PyObject* m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_phonopy", NULL, sizeof(struct module_state), _phonopy_methods, NULL, _phonopy_traverse, _phonopy_clear, NULL}; #define INITERROR return NULL PyObject* PyInit__phonopy(void) #else #define INITERROR return void init_phonopy(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject* module = PyModule_Create(&moduledef); #else PyObject* module = Py_InitModule("_phonopy", _phonopy_methods); #endif struct module_state* st; if (module == NULL) INITERROR; st = GETSTATE(module); st->error = PyErr_NewException("_phonopy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } #if PY_MAJOR_VERSION >= 3 return module; #endif } static PyObject* py_transform_dynmat_to_fc(PyObject* self, PyObject* args) { PyArrayObject* py_force_constants; PyArrayObject* py_dynamical_matrices; PyArrayObject* py_commensurate_points; PyArrayObject* py_svecs; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2pp_map; PyArrayObject* py_fc_index_map; double* fc; double* dm; double(*comm_points)[3]; double(*svecs)[3]; double* masses; long(*multi)[2]; long* s2pp_map; long* fc_index_map; long num_patom; long num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_force_constants, &py_dynamical_matrices, &py_commensurate_points, &py_svecs, &py_multi, &py_masses, &py_s2pp_map, &py_fc_index_map)) { return NULL; } fc = (double*)PyArray_DATA(py_force_constants); dm = (double*)PyArray_DATA(py_dynamical_matrices); comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points); svecs = (double(*)[3])PyArray_DATA(py_svecs); masses = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2pp_map = (long*)PyArray_DATA(py_s2pp_map); fc_index_map = (long*)PyArray_DATA(py_fc_index_map); num_patom = PyArray_DIMS(py_multi)[1]; num_satom = PyArray_DIMS(py_multi)[0]; phpy_transform_dynmat_to_fc(fc, dm, comm_points, svecs, multi, masses, s2pp_map, fc_index_map, num_patom, num_satom); Py_RETURN_NONE; } static PyObject* py_compute_permutation(PyObject* self, PyObject* args) { PyArrayObject* permutation; PyArrayObject* lattice; PyArrayObject* positions; PyArrayObject* permuted_positions; double symprec; int* rot_atoms; double(*lat)[3]; double(*pos)[3]; double(*rot_pos)[3]; int num_pos; int is_found; if (!PyArg_ParseTuple(args, "OOOOd", &permutation, &lattice, &positions, &permuted_positions, &symprec)) { return NULL; } rot_atoms = (int*)PyArray_DATA(permutation); lat = (double(*)[3])PyArray_DATA(lattice); pos = (double(*)[3])PyArray_DATA(positions); rot_pos = (double(*)[3])PyArray_DATA(permuted_positions); num_pos = PyArray_DIMS(positions)[0]; is_found = phpy_compute_permutation(rot_atoms, lat, pos, rot_pos, num_pos, symprec); if (is_found) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } static PyObject* py_gsv_set_smallest_vectors_sparse(PyObject* self, PyObject* args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; double symprec; double(*smallest_vectors)[27][3]; int* multiplicity; double(*pos_to)[3]; double(*pos_from)[3]; int(*lattice_points)[3]; double(*reduced_basis)[3]; int(*trans_mat)[3]; int num_pos_to, num_pos_from, num_lattice_points; if (!PyArg_ParseTuple(args, "OOOOOOOd", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &symprec)) { return NULL; } smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors); multiplicity = (int*)PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points); num_lattice_points = PyArray_DIMS(py_lattice_points)[0]; reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat); phpy_set_smallest_vectors_sparse(smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, num_lattice_points, reduced_basis, trans_mat, symprec); Py_RETURN_NONE; } static PyObject* py_gsv_set_smallest_vectors_dense(PyObject* self, PyObject* args) { PyArrayObject* py_smallest_vectors; PyArrayObject* py_multiplicity; PyArrayObject* py_pos_to; PyArrayObject* py_pos_from; PyArrayObject* py_lattice_points; PyArrayObject* py_reduced_basis; PyArrayObject* py_trans_mat; long initialize; double symprec; double(*smallest_vectors)[3]; long(*multiplicity)[2]; double(*pos_to)[3]; double(*pos_from)[3]; long(*lattice_points)[3]; double(*reduced_basis)[3]; long(*trans_mat)[3]; long num_pos_to, num_pos_from, num_lattice_points; if (!PyArg_ParseTuple(args, "OOOOOOOld", &py_smallest_vectors, &py_multiplicity, &py_pos_to, &py_pos_from, &py_lattice_points, &py_reduced_basis, &py_trans_mat, &initialize, &symprec)) { return NULL; } smallest_vectors = (double(*)[3])PyArray_DATA(py_smallest_vectors); multiplicity = (long(*)[2])PyArray_DATA(py_multiplicity); pos_to = (double(*)[3])PyArray_DATA(py_pos_to); pos_from = (double(*)[3])PyArray_DATA(py_pos_from); num_pos_to = PyArray_DIMS(py_pos_to)[0]; num_pos_from = PyArray_DIMS(py_pos_from)[0]; lattice_points = (long(*)[3])PyArray_DATA(py_lattice_points); num_lattice_points = PyArray_DIMS(py_lattice_points)[0]; reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis); trans_mat = (long(*)[3])PyArray_DATA(py_trans_mat); phpy_set_smallest_vectors_dense( smallest_vectors, multiplicity, pos_to, num_pos_to, pos_from, num_pos_from, lattice_points, num_lattice_points, reduced_basis, trans_mat, initialize, symprec); Py_RETURN_NONE; } static PyObject* py_perm_trans_symmetrize_fc(PyObject* self, PyObject* args) { PyArrayObject* py_fc; double* fc; int level; int n_satom; if (!PyArg_ParseTuple(args, "Oi", &py_fc, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); n_satom = PyArray_DIMS(py_fc)[0]; phpy_perm_trans_symmetrize_fc(fc, n_satom, level); Py_RETURN_NONE; } static PyObject* py_perm_trans_symmetrize_compact_fc(PyObject* self, PyObject* args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; int level; double* fc; int* perms; int* s2pp; int* p2s; int* nsym_list; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOOi", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list, &level)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; phpy_perm_trans_symmetrize_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, level); Py_RETURN_NONE; } static PyObject* py_transpose_compact_fc(PyObject* self, PyObject* args) { PyArrayObject* py_fc; PyArrayObject* py_permutations; PyArrayObject* py_s2pp_map; PyArrayObject* py_p2s_map; PyArrayObject* py_nsym_list; double* fc; int* s2pp; int* p2s; int* nsym_list; int* perms; int n_patom, n_satom; if (!PyArg_ParseTuple(args, "OOOOO", &py_fc, &py_permutations, &py_s2pp_map, &py_p2s_map, &py_nsym_list)) { return NULL; } fc = (double*)PyArray_DATA(py_fc); perms = (int*)PyArray_DATA(py_permutations); s2pp = (int*)PyArray_DATA(py_s2pp_map); p2s = (int*)PyArray_DATA(py_p2s_map); nsym_list = (int*)PyArray_DATA(py_nsym_list); n_patom = PyArray_DIMS(py_fc)[0]; n_satom = PyArray_DIMS(py_fc)[1]; phpy_set_index_permutation_symmetry_compact_fc(fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, 1); Py_RETURN_NONE; } static PyObject* py_get_dynamical_matrix(PyObject* self, PyObject* args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_q; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; double* dm; double* fc; double* q; double(*svecs)[3]; double* m; long(*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dynamical_matrix, &py_force_constants, &py_q, &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map)) { return NULL; } dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[3])PyArray_DATA(py_svecs); m = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; phpy_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, NULL, 1); Py_RETURN_NONE; } static PyObject* py_get_nac_dynamical_matrix(PyObject* self, PyObject* args) { PyArrayObject* py_dynamical_matrix; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_q_cart; PyArrayObject* py_q; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; double factor; double* dm; double* fc; double* q_cart; double* q; double(*svecs)[3]; double* m; double(*born)[3][3]; long(*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; long n; double(*charge_sum)[3][3]; if (!PyArg_ParseTuple(args, "OOOOOOOOOOd", &py_dynamical_matrix, &py_force_constants, &py_q, &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map, &py_q_cart, &py_born, &factor)) return NULL; dm = (double*)PyArray_DATA(py_dynamical_matrix); fc = (double*)PyArray_DATA(py_force_constants); q_cart = (double*)PyArray_DATA(py_q_cart); q = (double*)PyArray_DATA(py_q); svecs = (double(*)[3])PyArray_DATA(py_svecs); m = (double*)PyArray_DATA(py_masses); born = (double(*)[3][3])PyArray_DATA(py_born); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; charge_sum = (double(*)[3][3])malloc(sizeof(double[3][3]) * num_patom * num_patom); n = num_satom / num_patom; phpy_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born); phpy_get_dynamical_matrix_at_q(dm, num_patom, num_satom, fc, q, svecs, multi, m, s2p_map, p2s_map, charge_sum, 1); free(charge_sum); Py_RETURN_NONE; } static PyObject* py_get_recip_dipole_dipole(PyObject* self, PyObject* args) { PyArrayObject* py_dd; PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_q_cart; PyArrayObject* py_q_direction; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double factor; double lambda; double tolerance; double* dd; double* dd_q0; double(*G_list)[3]; double* q_vector; double* q_direction; double(*born)[3][3]; double(*dielectric)[3]; double(*pos)[3]; long num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOOOOddd", &py_dd, &py_dd_q0, &py_G_list, &py_q_cart, &py_q_direction, &py_born, &py_dielectric, &py_positions, &factor, &lambda, &tolerance)) return NULL; dd = (double*)PyArray_DATA(py_dd); dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); if ((PyObject*)py_q_direction == Py_None) { q_direction = NULL; } else { q_direction = (double*)PyArray_DATA(py_q_direction); } q_vector = (double*)PyArray_DATA(py_q_cart); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; phpy_get_recip_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */ dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, q_vector, q_direction, born, dielectric, pos, /* [natom, 3] */ factor, /* 4pi/V*unit-conv */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject* py_get_recip_dipole_dipole_q0(PyObject* self, PyObject* args) { PyArrayObject* py_dd_q0; PyArrayObject* py_G_list; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_positions; double lambda; double tolerance; double* dd_q0; double(*G_list)[3]; double(*born)[3][3]; double(*dielectric)[3]; double(*pos)[3]; long num_patom, num_G; if (!PyArg_ParseTuple(args, "OOOOOdd", &py_dd_q0, &py_G_list, &py_born, &py_dielectric, &py_positions, &lambda, &tolerance)) return NULL; dd_q0 = (double*)PyArray_DATA(py_dd_q0); G_list = (double(*)[3])PyArray_DATA(py_G_list); born = (double(*)[3][3])PyArray_DATA(py_born); dielectric = (double(*)[3])PyArray_DATA(py_dielectric); pos = (double(*)[3])PyArray_DATA(py_positions); num_G = PyArray_DIMS(py_G_list)[0]; num_patom = PyArray_DIMS(py_positions)[0]; phpy_get_recip_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */ G_list, /* [num_kvec, 3] */ num_G, num_patom, born, dielectric, pos, /* [natom, 3] */ lambda, /* 4 * Lambda^2 */ tolerance); Py_RETURN_NONE; } static PyObject* py_get_derivative_dynmat(PyObject* self, PyObject* args) { PyArrayObject* py_derivative_dynmat; PyArrayObject* py_force_constants; PyArrayObject* py_svecs; PyArrayObject* py_lattice; PyArrayObject* py_q_vector; PyArrayObject* py_multi; PyArrayObject* py_masses; PyArrayObject* py_s2p_map; PyArrayObject* py_p2s_map; PyArrayObject* py_born; PyArrayObject* py_dielectric; PyArrayObject* py_q_direction; double nac_factor; double* ddm; double* fc; double* q_vector; double* lat; double(*svecs)[3]; double* masses; long(*multi)[2]; long* s2p_map; long* p2s_map; long num_patom; long num_satom; double* born; double* epsilon; double* q_dir; if (!PyArg_ParseTuple( args, "OOOOOOOOOdOOO", &py_derivative_dynmat, &py_force_constants, &py_q_vector, &py_lattice, /* column vectors */ &py_svecs, &py_multi, &py_masses, &py_s2p_map, &py_p2s_map, &nac_factor, &py_born, &py_dielectric, &py_q_direction)) { return NULL; } ddm = (double*)PyArray_DATA(py_derivative_dynmat); fc = (double*)PyArray_DATA(py_force_constants); q_vector = (double*)PyArray_DATA(py_q_vector); lat = (double*)PyArray_DATA(py_lattice); svecs = (double(*)[3])PyArray_DATA(py_svecs); masses = (double*)PyArray_DATA(py_masses); multi = (long(*)[2])PyArray_DATA(py_multi); s2p_map = (long*)PyArray_DATA(py_s2p_map); p2s_map = (long*)PyArray_DATA(py_p2s_map); num_patom = PyArray_DIMS(py_p2s_map)[0]; num_satom = PyArray_DIMS(py_s2p_map)[0]; if ((PyObject*)py_born == Py_None) { born = NULL; } else { born = (double*)PyArray_DATA(py_born); } if ((PyObject*)py_dielectric == Py_None) { epsilon = NULL; } else { epsilon = (double*)PyArray_DATA(py_dielectric); } if ((PyObject*)py_q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)PyArray_DATA(py_q_direction); } phpy_get_derivative_dynmat_at_q(ddm, num_patom, num_satom, fc, q_vector, lat, svecs, multi, masses, s2p_map, p2s_map, nac_factor, born, epsilon, q_dir); Py_RETURN_NONE; } /* Thermal properties */ static PyObject* py_get_thermal_properties(PyObject* self, PyObject* args) { PyArrayObject* py_thermal_props; PyArrayObject* py_temperatures; PyArrayObject* py_frequencies; PyArrayObject* py_weights; double cutoff_frequency; double* temperatures; double* freqs; double* thermal_props; long* weights; long num_qpoints; long num_bands; long num_temp; if (!PyArg_ParseTuple(args, "OOOOd", &py_thermal_props, &py_temperatures, &py_frequencies, &py_weights, &cutoff_frequency)) { return NULL; } thermal_props = (double*)PyArray_DATA(py_thermal_props); temperatures = (double*)PyArray_DATA(py_temperatures); num_temp = (long)PyArray_DIMS(py_temperatures)[0]; freqs = (double*)PyArray_DATA(py_frequencies); num_qpoints = (long)PyArray_DIMS(py_frequencies)[0]; weights = (long*)PyArray_DATA(py_weights); num_bands = (long)PyArray_DIMS(py_frequencies)[1]; phpy_get_thermal_properties(thermal_props, temperatures, freqs, weights, num_temp, num_qpoints, num_bands, cutoff_frequency); Py_RETURN_NONE; } static PyObject* py_distribute_fc2(PyObject* self, PyObject* args) { PyArrayObject* py_force_constants; PyArrayObject* py_permutations; PyArrayObject* py_map_atoms; PyArrayObject* py_map_syms; PyArrayObject* py_atom_list; PyArrayObject* py_rotations_cart; double(*r_carts)[3][3]; double(*fc2)[3][3]; int* permutations; int* map_atoms; int* map_syms; int* atom_list; npy_intp num_pos, num_rot, len_atom_list; if (!PyArg_ParseTuple(args, "OOOOOO", &py_force_constants, &py_atom_list, &py_rotations_cart, &py_permutations, &py_map_atoms, &py_map_syms)) { return NULL; } fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants); atom_list = (int*)PyArray_DATA(py_atom_list); len_atom_list = PyArray_DIMS(py_atom_list)[0]; permutations = (int*)PyArray_DATA(py_permutations); map_atoms = (int*)PyArray_DATA(py_map_atoms); map_syms = (int*)PyArray_DATA(py_map_syms); r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart); num_rot = PyArray_DIMS(py_permutations)[0]; num_pos = PyArray_DIMS(py_permutations)[1]; if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms"); return NULL; } if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos) { PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms"); return NULL; } if (PyArray_DIMS(py_rotations_cart)[0] != num_rot) { PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length"); return NULL; } phpy_distribute_fc2(fc2, atom_list, len_atom_list, r_carts, permutations, map_atoms, map_syms, num_rot, num_pos); Py_RETURN_NONE; } static PyObject* py_thm_relative_grid_address(PyObject* self, PyObject* args) { PyArrayObject* py_relative_grid_address; PyArrayObject* py_reciprocal_lattice_py; long(*relative_grid_address)[4][3]; double(*reciprocal_lattice)[3]; if (!PyArg_ParseTuple(args, "OO", &py_relative_grid_address, &py_reciprocal_lattice_py)) { return NULL; } relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py); phpy_get_relative_grid_address(relative_grid_address, reciprocal_lattice); Py_RETURN_NONE; } static PyObject* py_thm_all_relative_grid_address(PyObject* self, PyObject* args) { PyArrayObject* py_relative_grid_address; long(*relative_grid_address)[24][4][3]; if (!PyArg_ParseTuple(args, "O", &py_relative_grid_address)) { return NULL; } relative_grid_address = (long(*)[24][4][3])PyArray_DATA(py_relative_grid_address); phpy_get_all_relative_grid_address(relative_grid_address); Py_RETURN_NONE; } static PyObject* py_thm_integration_weight(PyObject* self, PyObject* args) { double omega; PyArrayObject* py_tetrahedra_omegas; char* function; double(*tetrahedra_omegas)[4]; double iw; if (!PyArg_ParseTuple(args, "dOs", &omega, &py_tetrahedra_omegas, &function)) { return NULL; } tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); iw = phpy_get_integration_weight(omega, tetrahedra_omegas, function[0]); return PyFloat_FromDouble(iw); } static PyObject* py_thm_integration_weight_at_omegas(PyObject* self, PyObject* args) { PyArrayObject* py_integration_weights; PyArrayObject* py_omegas; PyArrayObject* py_tetrahedra_omegas; char* function; double* omegas; double* iw; long num_omegas; double(*tetrahedra_omegas)[4]; long i; if (!PyArg_ParseTuple(args, "OOOs", &py_integration_weights, &py_omegas, &py_tetrahedra_omegas, &function)) { return NULL; } omegas = (double*)PyArray_DATA(py_omegas); iw = (double*)PyArray_DATA(py_integration_weights); num_omegas = (long)PyArray_DIMS(py_omegas)[0]; tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas); #pragma omp parallel for for (i = 0; i < num_omegas; i++) { iw[i] = phpy_get_integration_weight(omegas[i], tetrahedra_omegas, function[0]); } Py_RETURN_NONE; } static PyObject* py_get_tetrahedra_frequenies(PyObject* self, PyObject* args) { PyArrayObject* py_freq_tetras; PyArrayObject* py_grid_points; PyArrayObject* py_mesh; PyArrayObject* py_grid_address; PyArrayObject* py_gp_ir_index; PyArrayObject* py_relative_grid_address; PyArrayObject* py_frequencies; double* freq_tetras; long* grid_points; long* mesh; long(*grid_address)[3]; long* gp_ir_index; long(*relative_grid_address)[3]; double* frequencies; long num_gp_in, num_band; if (!PyArg_ParseTuple(args, "OOOOOOO", &py_freq_tetras, &py_grid_points, &py_mesh, &py_grid_address, &py_gp_ir_index, &py_relative_grid_address, &py_frequencies)) { return NULL; } freq_tetras = (double*)PyArray_DATA(py_freq_tetras); grid_points = (long*)PyArray_DATA(py_grid_points); num_gp_in = PyArray_DIMS(py_grid_points)[0]; mesh = (long*)PyArray_DATA(py_mesh); grid_address = (long(*)[3])PyArray_DATA(py_grid_address); gp_ir_index = (long*)PyArray_DATA(py_gp_ir_index); relative_grid_address = (long(*)[3])PyArray_DATA(py_relative_grid_address); frequencies = (double*)PyArray_DATA(py_frequencies); num_band = PyArray_DIMS(py_frequencies)[1]; phpy_get_tetrahedra_frequenies(freq_tetras, mesh, grid_points, grid_address, relative_grid_address, gp_ir_index, frequencies, num_band, num_gp_in); Py_RETURN_NONE; } static PyObject* py_tetrahedron_method_dos(PyObject* self, PyObject* args) { PyArrayObject* py_dos; PyArrayObject* py_mesh; PyArrayObject* py_freq_points; PyArrayObject* py_frequencies; PyArrayObject* py_coef; PyArrayObject* py_grid_address; PyArrayObject* py_grid_mapping_table; PyArrayObject* py_relative_grid_address; double* dos; long* mesh; double* freq_points; double* frequencies; double* coef; long(*grid_address)[3]; long num_gp, num_ir_gp, num_band, num_freq_points, num_coef; long* grid_mapping_table; long(*relative_grid_address)[4][3]; if (!PyArg_ParseTuple(args, "OOOOOOOO", &py_dos, &py_mesh, &py_freq_points, &py_frequencies, &py_coef, &py_grid_address, &py_grid_mapping_table, &py_relative_grid_address)) { return NULL; } /* dos[num_ir_gp][num_band][num_freq_points][num_coef] */ dos = (double*)PyArray_DATA(py_dos); mesh = (long*)PyArray_DATA(py_mesh); freq_points = (double*)PyArray_DATA(py_freq_points); num_freq_points = (long)PyArray_DIMS(py_freq_points)[0]; frequencies = (double*)PyArray_DATA(py_frequencies); num_ir_gp = (long)PyArray_DIMS(py_frequencies)[0]; num_band = (long)PyArray_DIMS(py_frequencies)[1]; coef = (double*)PyArray_DATA(py_coef); num_coef = (long)PyArray_DIMS(py_coef)[1]; grid_address = (long(*)[3])PyArray_DATA(py_grid_address); num_gp = (long)PyArray_DIMS(py_grid_address)[0]; grid_mapping_table = (long*)PyArray_DATA(py_grid_mapping_table); relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address); phpy_tetrahedron_method_dos(dos, mesh, grid_address, relative_grid_address, grid_mapping_table, freq_points, frequencies, coef, num_freq_points, num_ir_gp, num_band, num_coef, num_gp); Py_RETURN_NONE; }
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2053 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(image,token) \ { \ (void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *); static ssize_t TracePath(Image *,MVGInfo *,const char *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, &draw_info->fill_pattern->exception); else if (draw_info->tile != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue, &draw_info->tile->exception); clone_info->tile=NewImageList(); /* tile is deprecated */ if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,&draw_info->stroke_pattern->exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_opacity=draw_info->fill_opacity; clone_info->stroke_opacity=draw_info->stroke_opacity; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,&draw_info->clipping_mask->exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,&draw_info->composite_mask->exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPathToPolygon() returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) if (polygon_info->edges[i].points != (PointInfo *) NULL) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, ExceptionInfo *exception) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo *) NULL); } number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; polygon_info->number_edges=edge+1; points=(PointInfo *) NULL; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } } polygon_info->number_edges=edge; polygon_info->number_edges=edge; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges, polygon_info->number_edges,sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { EdgeInfo *edge_info; edge_info=polygon_info->edges+i; edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points, edge_info->number_points,sizeof(*edge_info->points)); if (edge_info->points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPrimitiveToPath() returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath( const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; magick_unreferenced(draw_info); /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case PointPrimitive: case ColorPrimitive: case MattePrimitive: case TextPrimitive: case ImagePrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PathInfo *) NULL); } coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->tile != (Image *) NULL) draw_info->tile=DestroyImage(draw_info->tile); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo extent[4], min, max, point; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetMagickPixelPacket(image,&zero); exception=(&image->exception); start=CastDoubleToLong(ceil(edge.y1-0.5)); stop=CastDoubleToLong(floor(edge.y2+0.5)); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { MagickPixelPacket composite, pixel; PointInfo point; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; if (status == MagickFalse) continue; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong( ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor( inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception); if (q == (PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; x_offset=0; for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5)); x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolateMagickPixelPacket(source,source_view, UndefinedInterpolatePixel,point.x,point.y,&pixel,exception); if (status == MagickFalse) break; SetMagickPixelPacket(image,q,indexes+x_offset,&composite); MagickPixelCompositeOver(&pixel,pixel.opacity,&composite, composite.opacity,&composite); SetPixelPacket(image,&composite,q,indexes+x_offset); x_offset++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorDatabase("#f00",&clone_info->stroke, &image->exception); else status=QueryColorDatabase("#0f0",&clone_info->stroke, &image->exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, &image->exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageClipMask(image,clipping_mask); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(clip_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageClipMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.opacity=(Quantum) TransparentOpacity; status=SetImageBackgroundColor(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); (void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(clip_mask,TrueAlphaChannel); if (draw_info->compliance != SVGCompliance) status&=NegateImage(clip_mask,MagickFalse); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(composite_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; status=RenderMVGContent(composite_mask,clone_info,0); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(composite_mask,TrueAlphaChannel); status&=NegateImage(composite_mask,MagickFalse); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (double) (MaxBezierCoordinates >> 2)) continue; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; MagickPixelPacket composite, pixel; register IndexPacket *magick_restrict indexes; register ssize_t i, x; register PixelPacket *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,(double) gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat, (double) gradient->radius); else repeat=fmod(offset,(double) gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } MagickPixelCompositeOver(&composite,composite.opacity,&pixel, pixel.opacity,&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad+1; quantum=sizeof(**mvg_info->primitive_info); if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); if ((IsNaN(value) != 0) || (value < -((double) SSIZE_MAX-512.0)) || (value > ((double) SSIZE_MAX-512.0))) return(0.0); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char key[2*MaxTextExtent], keyword[MaxTextExtent], geometry[MaxTextExtent], name[MaxTextExtent], *next_token, pattern[MaxTextExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PixelPacket start_color; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel); if (status == MagickFalse) return(MagickFalse); } primitive=(char *) NULL; if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=(&image->exception); graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MaxTextExtent; cursor=0.0; defsDepth=0; symbolDepth=0; macros=GetMVGMacros(primitive); status=QueryColorDatabase("#000000",&start_color,&image->exception); for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MaxTextExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->border_color, &image->exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,&image->exception); if (graphic_context[n]->compliance != SVGCompliance) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } if (LocaleCompare("currentColor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->fill, &image->exception); if (graphic_context[n]->fill_opacity != OpaqueOpacity) graphic_context[n]->fill.opacity=ClampToQuantum( graphic_context[n]->fill_opacity); } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_opacity*=(1.0-opacity); else graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); if (graphic_context[n]->fill.opacity != TransparentOpacity) graphic_context[n]->fill.opacity=(Quantum) graphic_context[n]->fill_opacity; else graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange* opacity); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,&image->exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,graphic_context[n]->composite_mask); } break; } if (LocaleCompare("matte",keyword) == 0) { primitive_type=MattePrimitive; break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_opacity*=(1.0-opacity); graphic_context[n]->stroke_opacity*=(1.0-opacity); } else { graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),DrawError, "UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageClipMask(image,(Image *) NULL); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MaxTextExtent], name[MaxTextExtent], type[MaxTextExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MaxTextExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { RectangleInfo bounds; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); (void) GetNextToken(q,&q,extent,token); bounds.x=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.y=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { GradientType type; PixelPacket stop_color; (void) GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&stop_color,&image->exception); type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,&start_color,&stop_color); start_color=stop_color; (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->stroke, &image->exception); if (graphic_context[n]->stroke_opacity != OpaqueOpacity) graphic_context[n]->stroke.opacity=ClampToQuantum( graphic_context[n]->stroke_opacity); } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *p; p=q; (void) GetNextToken(p,&p,extent,token); if (*token == ',') (void) GetNextToken(p,&p,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(p,&p,extent,token); if (*token == ',') (void) GetNextToken(p,&p,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_opacity*=(1.0-opacity); else graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); if (graphic_context[n]->stroke.opacity != TransparentOpacity) graphic_context[n]->stroke.opacity=(Quantum) graphic_context[n]->stroke_opacity; else graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange* opacity); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->undercolor, &image->exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(BezierQuantum*(double) primitive_info[j].coordinates); if (primitive_info[j].coordinates > (108*BezierQuantum)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(image,keyword); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(image,&mvg_info,token); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case ColorPrimitive: case MattePrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info,(size_t) ExpandAffine(&graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,(size_t) graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); } status&=DrawPrimitive(image,graphic_context[n],primitive_info); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryImageException(DrawError, "NonconformingDrawingPrimitiveDefinition",keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) { return(RenderMVGContent(image,draw_info,0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern) { char property[MaxTextExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MaxTextExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info); image_info=DestroyImageInfo(image_info); (void) QueryColorDatabase("#00000000",&(*pattern)->background_color, &image->exception); (void) SetImageBackgroundColor(*pattern); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MaxTextExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo **) NULL); } (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(draw_info,primitive_info,exception); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); polygon_info[0]=ConvertPathToPolygon(path_info,exception); if (polygon_info[0] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } for (i=1; i < (ssize_t) number_threads; i++) { EdgeInfo *edge_info; register ssize_t j; polygon_info[i]=(PolygonInfo *) AcquireMagickMemory( sizeof(*polygon_info[i])); if (polygon_info[i] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } polygon_info[i]->number_edges=0; edge_info=polygon_info[0]->edges; polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory( polygon_info[0]->number_edges,sizeof(*edge_info)); if (polygon_info[i]->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges,edge_info, polygon_info[0]->number_edges*sizeof(*edge_info)); for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) polygon_info[i]->edges[j].points=(PointInfo *) NULL; polygon_info[i]->number_edges=polygon_info[0]->number_edges; for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) { edge_info=polygon_info[0]->edges+j; polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory( edge_info->number_points,sizeof(*edge_info)); if (polygon_info[i]->edges[j].points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges[j].points,edge_info->points, edge_info->number_points*sizeof(*edge_info->points)); } } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge) { assert(edge < (ssize_t) polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < (ssize_t) polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_opacity) { double alpha, beta, distance, subpath_opacity; PointInfo delta; register EdgeInfo *p; register const PointInfo *q; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_opacity=0.0; subpath_opacity=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_opacity < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_opacity=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25))) *stroke_opacity=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0)) continue; if (distance <= 0.0) { subpath_opacity=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_opacity < (alpha*alpha)) subpath_opacity=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_opacity >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_opacity); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; const char *artifact; double mid; ExceptionInfo *exception; MagickBooleanType fill, status; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info, &image->exception); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; artifact=GetImageArtifact(image,"draw:render-bounding-rectangles"); if (IsStringTrue(artifact) != MagickFalse) (void) DrawBoundingRectangles(image,draw_info,polygon_info[0]); for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for ( ; x <= stop_x; x++) { if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) && (y == CastDoubleToLong(ceil(primitive_info->point.y-0.5)))) (void) GetFillColor(draw_info,x-start_x,y-start_y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_opacity, stroke_opacity; PixelPacket fill_color, stroke_color; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill, draw_info->fill_rule,x,y,&stroke_opacity); if (draw_info->stroke_antialias == MagickFalse) { fill_opacity=fill_opacity > 0.5 ? 1.0 : 0.0; stroke_opacity=stroke_opacity > 0.5 ? 1.0 : 0.0; } (void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color); fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange- fill_color.opacity)); MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q, (MagickRealType) q->opacity,q); (void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color); stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange- stroke_color.opacity)); MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q, (MagickRealType) q->opacity,q); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); switch (primitive_info->primitive) { case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case MattePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "MattePrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; ExceptionInfo *exception; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } exception=(&image->exception); status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelGray(&draw_info->fill) == MagickFalse) || (IsPixelGray(&draw_info->stroke) == MagickFalse))) status=SetImageColorspace(image,sRGBColorspace); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,draw_info->clipping_mask); status&=SetImageMask(image,draw_info->composite_mask); } x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelPacket target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,q); q++; } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; status&=GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,q); q++; } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case MattePrimitive: { if (image->matte == MagickFalse) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel); switch (primitive_info->method) { case PointMethod: default: { PixelPacket pixel; PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelPacket pixel, target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; status&=GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { PixelPacket pixel; for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MaxTextExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, &image->exception); else if (*primitive_info->text != '\0') { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); status&=SetImageInfo(clone_info,0,exception); if (clone_info->size != (char *) NULL) clone_info->size=DestroyString(clone_info->size); if (clone_info->extract != (char *) NULL) clone_info->extract=DestroyString(clone_info->extract); if ((LocaleNCompare(clone_info->magick,"http",4) == 0) || (LocaleCompare(clone_info->magick,"mpri") == 0)) (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); if (*clone_info->filename != '\0') composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=0; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5)); y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5)); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { char geometry[MaxTextExtent]; /* Resize image. */ (void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!", primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL,geometry); } if (composite_image->matte == MagickFalse) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel); if (draw_info->opacity != OpaqueOpacity) status&=SetImageOpacity(composite_image,draw_info->opacity); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry, &image->exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine); else status&=CompositeImage(image,draw_info->compose,composite_image, geometry.x,geometry.y); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelPacket fill_color; PixelPacket *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&fill_color); MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q, (MagickRealType) q->opacity,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MaxTextExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.opacity != (Quantum) TransparentOpacity)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.opacity != (Quantum) TransparentOpacity) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,(Image *) NULL); status&=SetImageMask(image,(Image *) NULL); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,&clone_info->stroke_pattern->exception); clone_info->stroke.opacity=(Quantum) TransparentOpacity; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(draw_info,p,&image->exception); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p); status&=DrawRoundLinecap(image,draw_info,q); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorDatabase("#000F",&draw_info->fill,exception); (void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->opacity=OpaqueOpacity; draw_info->fill_opacity=OpaqueOpacity; draw_info->stroke_opacity=OpaqueOpacity; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; draw_info->pointsize=12.0; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->undercolor.opacity=(Quantum) TransparentOpacity; draw_info->border_color=clone_info->border_color; draw_info->compose=OverCompositeOp; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->fill,exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->stroke,exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->undercolor,exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5* MagickPI+MagickEpsilon))))); p=primitive_info; status=MagickTrue; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); quantum=MagickMin(quantum/number_coordinates,BezierQuantum); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (coordinates > (108.0*BezierQuantum)) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(Image *image,MVGInfo *mvg_info,const char *path) { char *next_token, token[MaxTextExtent]; const char *p; double x, y; int attribute, last_attribute; MagickStatusType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(image,token); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ (void) ThrowMagickException(exception,GetMagickModule(), \ ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(MaxStrokePad,MaxStrokePad); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.q- theta.p)/(2.0*sqrt((double) (1.0/mid)))))); CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p- theta.q)/(2.0*sqrt((double) (1.0/mid)))))); CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
ast-dump-openmp-target-enter-data.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int x) { #pragma omp target enter data map(to \ : x) } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-enter-data.c:3:1, line:6:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1> // CHECK-NEXT: `-OMPTargetEnterDataDirective {{.*}} <line:4:1, line:5:39> openmp_standalone_directive // CHECK-NEXT: |-OMPMapClause {{.*}} <line:4:31, line:5:38> // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:37> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:4:1> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CompoundStmt {{.*}} <col:1> // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-enter-data.c:4:1) *const restrict'
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/ASTContext.h" #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// Representation of an OpenMP canonical loop. /// /// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form /// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form /// OpenMP 4.0, section 2.6 Canonical Loop Form /// OpenMP 4.5, section 2.6 Canonical Loop Form /// OpenMP 5.0, section 2.9.1 Canonical Loop Form /// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form /// /// An OpenMP canonical loop is a for-statement or range-based for-statement /// with additional requirements that ensure that the number of iterations is /// known before entering the loop and allow skipping to an arbitrary iteration. /// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is /// known to fulfill OpenMP's canonical loop requirements because of being /// associated to an OMPLoopBasedDirective. That is, the general structure is: /// /// OMPLoopBasedDirective /// [`- CapturedStmt ] /// [ `- CapturedDecl] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- Stmt /// /// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some /// directives such as OMPParallelForDirective, but others do not need them /// (such as OMPTileDirective). In The OMPCanonicalLoop and /// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the /// directive. A OMPCanonicalLoop must not appear in the AST unless associated /// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the /// OMPCanonicalLoop may also be wrapped in a CompoundStmt: /// /// [...] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- CompoundStmt /// |- Leading in-between code (if any) /// |- OMPCanonicalLoop /// | `- ForStmt/CXXForRangeStmt /// | `- ... /// `- Trailing in-between code (if any) /// /// The leading/trailing in-between code must not itself be a OMPCanonicalLoop /// to avoid confusion which loop belongs to the nesting. /// /// There are three different kinds of iteration variables for different /// purposes: /// * Loop user variable: The user-accessible variable with different value for /// each iteration. /// * Loop iteration variable: The variable used to identify a loop iteration; /// for range-based for-statement, this is the hidden iterator '__begin'. For /// other loops, it is identical to the loop user variable. Must be a /// random-access iterator, pointer or integer type. /// * Logical iteration counter: Normalized loop counter starting at 0 and /// incrementing by one at each iteration. Allows abstracting over the type /// of the loop iteration variable and is always an unsigned integer type /// appropriate to represent the range of the loop iteration variable. Its /// value corresponds to the logical iteration number in the OpenMP /// specification. /// /// This AST node provides two captured statements: /// * The distance function which computes the number of iterations. /// * The loop user variable function that computes the loop user variable when /// given a logical iteration number. /// /// These captured statements provide the link between C/C++ semantics and the /// logical iteration counters used by the OpenMPIRBuilder which is /// language-agnostic and therefore does not know e.g. how to advance a /// random-access iterator. The OpenMPIRBuilder will use this information to /// apply simd, workshare-loop, distribute, taskloop and loop directives to the /// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an /// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an /// OMPLoopDirective and skipped when searching for the associated syntactical /// loop. /// /// Example: /// <code> /// std::vector<std::string> Container{1,2,3}; /// for (std::string Str : Container) /// Body(Str); /// </code> /// which is syntactic sugar for approximately: /// <code> /// auto &&__range = Container; /// auto __begin = std::begin(__range); /// auto __end = std::end(__range); /// for (; __begin != __end; ++__begin) { /// std::String Str = *__begin; /// Body(Str); /// } /// </code> /// In this example, the loop user variable is `Str`, the loop iteration /// variable is `__begin` of type `std::vector<std::string>::iterator` and the /// logical iteration number type is `size_t` (unsigned version of /// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`). /// Therefore, the distance function will be /// <code> /// [&](size_t &Result) { Result = __end - __begin; } /// </code> /// and the loop variable function is /// <code> /// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) { /// Result = __begin + Logical; /// } /// </code> /// The variable `__begin`, aka the loop iteration variable, is captured by /// value because it is modified in the loop body, but both functions require /// the initial value. The OpenMP specification explicitly leaves unspecified /// when the loop expressions are evaluated such that a capture by reference is /// sufficient. class OMPCanonicalLoop : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Children of this AST node. enum { LOOP_STMT, DISTANCE_FUNC, LOOPVAR_FUNC, LOOPVAR_REF, LastSubStmt = LOOPVAR_REF }; private: /// This AST node's children. Stmt *SubStmts[LastSubStmt + 1] = {}; OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {} public: /// Create a new OMPCanonicalLoop. static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt, CapturedStmt *DistanceFunc, CapturedStmt *LoopVarFunc, DeclRefExpr *LoopVarRef) { OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop(); S->setLoopStmt(LoopStmt); S->setDistanceFunc(DistanceFunc); S->setLoopVarFunc(LoopVarFunc); S->setLoopVarRef(LoopVarRef); return S; } /// Create an empty OMPCanonicalLoop for deserialization. static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) { return new (Ctx) OMPCanonicalLoop(); } static bool classof(const Stmt *S) { return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass; } SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); } SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); } /// Return this AST node's children. /// @{ child_range children() { return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } /// @} /// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt). /// @{ Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; } const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; } void setLoopStmt(Stmt *S) { assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) && "Canonical loop must be a for loop (range-based or otherwise)"); SubStmts[LOOP_STMT] = S; } /// @} /// The function that computes the number of loop iterations. Can be evaluated /// before entering the loop but after the syntactical loop's init /// statement(s). /// /// Function signature: void(LogicalTy &Result) /// Any values necessary to compute the distance are captures of the closure. /// @{ CapturedStmt *getDistanceFunc() { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } const CapturedStmt *getDistanceFunc() const { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } void setDistanceFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[DISTANCE_FUNC] = S; } /// @} /// The function that computes the loop user variable from a logical iteration /// counter. Can be evaluated as first statement in the loop. /// /// Function signature: void(LoopVarTy &Result, LogicalTy Number) /// Any other values required to compute the loop user variable (such as start /// value, step size) are captured by the closure. In particular, the initial /// value of loop iteration variable is captured by value to be unaffected by /// previous iterations. /// @{ CapturedStmt *getLoopVarFunc() { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } const CapturedStmt *getLoopVarFunc() const { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } void setLoopVarFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[LOOPVAR_FUNC] = S; } /// @} /// Reference to the loop user variable as accessed in the loop body. /// @{ DeclRefExpr *getLoopVarRef() { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } const DeclRefExpr *getLoopVarRef() const { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } void setLoopVarRef(DeclRefExpr *E) { assert(E && "Expected non-null loop variable"); SubStmts[LOOPVAR_REF] = E; } /// @} }; /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Kind of the directive. OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { if (!Data) return llvm::None; return Data->getClauses(); } protected: /// Data, associated with the directive. OMPChildren *Data = nullptr; /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)) {} template <typename T, typename... Params> static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses, AssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T, typename... Params> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T; Inst->Data = Data; return Inst; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> static const SpecificClause *getSingleClause(ArrayRef<OMPClause *> Clauses) { auto ClausesOfKind = getClausesOfKind<SpecificClause>(Clauses); if (ClausesOfKind.begin() != ClausesOfKind.end()) { assert(std::next(ClausesOfKind.begin()) == ClausesOfKind.end() && "There are at least 2 clauses of the specified kind"); return *ClausesOfKind.begin(); } return nullptr; } template <typename SpecificClause> const SpecificClause *getSingleClause() const { return getSingleClause<SpecificClause>(clauses()); } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { if (!Data) return 0; return Data->getNumClauses(); } /// Returns specified clause. /// /// \param I Number of clause. /// OMPClause *getClause(unsigned I) const { return clauses()[I]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getAssociatedStmt(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getCapturedStmt(RegionKind, CaptureRegions); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getInnermostCapturedStmt(CaptureRegions); } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!Data) return child_range(child_iterator(), child_iterator()); return Data->getAssociatedStmtAsRange(); } const_child_range children() const { return const_cast<OMPExecutableDirective *>(this)->children(); } ArrayRef<OMPClause *> clauses() const { if (!Data) return llvm::None; return Data->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const { return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock(); } Stmt *getStructuredBlock(); const Stmt *getRawStmt() const { return const_cast<OMPExecutableDirective *>(this)->getRawStmt(); } Stmt *getRawStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getRawStmt(); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelDirective() : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// The base class for all loop-based directives, including loop transformation /// directives. class OMPLoopBasedDirective : public OMPExecutableDirective { friend class ASTStmtReader; protected: /// Number of collapsed loops as specified by 'collapse' clause. unsigned NumAssociatedLoops = 0; /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param NumAssociatedLoops Number of loops associated with the construct. /// OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc), NumAssociatedLoops(NumAssociatedLoops) {} public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned I = 0; I < Size; ++I) { Counters[I] = nullptr; PrivateCounters[I] = nullptr; Inits[I] = nullptr; Updates[I] = nullptr; Finals[I] = nullptr; DependentCounters[I] = nullptr; DependentInits[I] = nullptr; FinalsConditions[I] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getLoopsNumber() const { return NumAssociatedLoops; } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback, llvm::function_ref<void(OMPLoopTransformationDirective *)> OnTransformationCallback); static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback, llvm::function_ref<void(const OMPLoopTransformationDirective *)> OnTransformationCallback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; auto &&NewTransformCb = [OnTransformationCallback](OMPLoopTransformationDirective *A) { OnTransformationCallback(A); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback, NewTransformCb); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback) { auto &&TransformCb = [](OMPLoopTransformationDirective *) {}; return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback, TransformCb); } static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, const Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } /// Calls the specified callback function for all the loop bodies in \p /// CurStmt, from the outermost loop to the innermost. static void doForAllLoopsBodies( Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback); static void doForAllLoopsBodies( const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) { Callback(Cnt, Loop, Body); }; doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } static bool classof(const Stmt *T) { if (auto *D = dyn_cast<OMPExecutableDirective>(T)) return isOpenMPLoopDirective(D->getDirectiveKind()); return false; } }; /// The base class for all loop transformation directives. class OMPLoopTransformationDirective : public OMPLoopBasedDirective { friend class ASTStmtReader; /// Number of loops generated by this loop transformation. unsigned NumGeneratedLoops = 0; protected: explicit OMPLoopTransformationDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops) {} /// Set the number of loops generated by this loop transformation. void setNumGeneratedLoops(unsigned Num) { NumGeneratedLoops = Num; } public: /// Return the number of associated (consumed) loops. unsigned getNumAssociatedLoops() const { return getLoopsNumber(); } /// Return the number of loops generated by this loop transformation. unsigned getNumGeneratedLoops() { return NumGeneratedLoops; } /// Get the de-sugared statements after after the loop transformation. /// /// Might be nullptr if either the directive generates no loops and is handled /// directly in CodeGen, or resolving a template-dependence context is /// required. Stmt *getTransformedStmt() const; /// Return preinits statement. Stmt *getPreInits() const; static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTileDirectiveClass || T->getStmtClass() == OMPUnrollDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPLoopBasedDirective { friend class ASTStmtReader; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length NumAssociatedLoops are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { IterationVariableOffset = 0, LastIterationOffset = 1, CalcLastIterationOffset = 2, PreConditionOffset = 3, CondOffset = 4, InitOffset = 5, IncOffset = 6, PreInitsOffset = 7, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 8, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 8, LowerBoundVariableOffset = 9, UpperBoundVariableOffset = 10, StrideVariableOffset = 11, EnsureUpperBoundOffset = 12, NextLowerBoundOffset = 13, NextUpperBoundOffset = 14, NumIterationsOffset = 15, // Offset to the end for worksharing loop directives. WorksharingEnd = 16, PrevLowerBoundVariableOffset = 16, PrevUpperBoundVariableOffset = 17, DistIncOffset = 18, PrevEnsureUpperBoundOffset = 19, CombinedLowerBoundVariableOffset = 20, CombinedUpperBoundVariableOffset = 21, CombinedEnsureUpperBoundOffset = 22, CombinedInitOffset = 23, CombinedConditionOffset = 24, CombinedNextLowerBoundOffset = 25, CombinedNextUpperBoundOffset = 26, CombinedDistConditionOffset = 27, CombinedParForInDistConditionOffset = 28, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 29, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind())]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 2 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 3 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 4 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 5 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 6 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 7 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPGenericLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { Data->getChildren()[IterationVariableOffset] = IV; } void setLastIteration(Expr *LI) { Data->getChildren()[LastIterationOffset] = LI; } void setCalcLastIteration(Expr *CLI) { Data->getChildren()[CalcLastIterationOffset] = CLI; } void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; } void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; } void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; } void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[IsLastIterVariableOffset] = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[LowerBoundVariableOffset] = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[UpperBoundVariableOffset] = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[StrideVariableOffset] = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[EnsureUpperBoundOffset] = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextLowerBoundOffset] = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextUpperBoundOffset] = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NumIterationsOffset] = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[DistIncOffset] = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedInitOffset] = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedConditionOffset] = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedDistConditionOffset] = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedParForInDistConditionOffset] = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: Expr *getIterationVariable() const { return cast<Expr>(Data->getChildren()[IterationVariableOffset]); } Expr *getLastIteration() const { return cast<Expr>(Data->getChildren()[LastIterationOffset]); } Expr *getCalcLastIteration() const { return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]); } Expr *getPreCond() const { return cast<Expr>(Data->getChildren()[PreConditionOffset]); } Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); } Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); } Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); } const Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[StrideVariableOffset]); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NumIterationsOffset]); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[DistIncOffset]); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedInitOffset]); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedConditionOffset]); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPGenericLoopDirectiveClass || T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass || T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass || T->getStmtClass() == OMPParallelGenericLoopDirectiveClass || T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionsDirective() : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSingleDirective() : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc), DirName(Name) {} /// Build an empty directive. /// explicit OMPCriticalDirective() : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation()) {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master' directive. /// /// \code /// #pragma omp parallel master private(a,b) /// \endcode /// In this example directive '#pragma omp parallel master' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc) {} explicit OMPParallelMasterDirective() : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelMasterDirective *>(this) ->getTaskReductionRefExpr(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelSectionsDirective() : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelSectionsDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if this directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskDirective() : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation()) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPTaskwaitDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskgroupDirective() : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation()) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef(); } Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPFlushDirective() : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp depobj' directive. /// /// \code /// #pragma omp depobj(a) depend(in:x,y) /// \endcode /// In this example directive '#pragma omp depobj' initializes a depobj object /// 'a' with dependence type 'in' and a list with 'x' and 'y' locators. class OMPDepobjDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDepobjDirective() : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPDepobjDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDepobjDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDepobjDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPOrderedDirective() : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// \param IsStandalone true, if the the standalone directive is created. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, bool IsStandalone, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart = false; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPAtomicDirective() : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation()) {} enum DataPositionTy : size_t { POS_X = 0, POS_V, POS_E, POS_UpdateExpr, POS_D, POS_Cond, }; /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { Data->getChildren()[DataPositionTy::POS_X] = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { Data->getChildren()[DataPositionTy::POS_UpdateExpr] = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; } /// Set 'd' part of the associated expression/statement. void setD(Expr *D) { Data->getChildren()[DataPositionTy::POS_D] = D; } /// Set conditional expression in `atomic compare`. void setCond(Expr *C) { Data->getChildren()[DataPositionTy::POS_Cond] = C; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param D 'd' part of the associated expression/statement. /// \param Cond Conditional expression in `atomic compare` construct. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, Expr *D, Expr *Cond, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]); } const Expr *getX() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>( Data->getChildren()[DataPositionTy::POS_UpdateExpr]); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>( Data->getChildren()[DataPositionTy::POS_UpdateExpr]); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]); } const Expr *getV() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]); } const Expr *getExpr() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]); } /// Get 'd' part of the associated expression/statement. Expr *getD() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_D]); } Expr *getD() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_D]); } /// Get the 'cond' part of the source atomic expression. Expr *getCondExpr() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_Cond]); } Expr *getCondExpr() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_Cond]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDirective() : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDataDirective() : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetEnterDataDirective() : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetExitDataDirective() : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetParallelDirective() : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTeamsDirective() : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// statements and child expressions. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc) {} /// Build an empty directive. explicit OMPCancellationPointDirective() : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPCancelDirective() : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetUpdateDirective() : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc) { } /// Build an empty directive. /// explicit OMPTargetTeamsDirective() : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents the '#pragma omp tile' loop transformation directive. class OMPTileDirective final : public OMPLoopTransformationDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumLoops) : OMPLoopTransformationDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile, StartLoc, EndLoc, NumLoops) { setNumGeneratedLoops(3 * NumLoops); } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp tile'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param NumLoops Number of associated loops (number of items in the /// 'sizes' clause). /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, unsigned NumLoops, Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp tile' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. /// \param NumLoops Number of associated loops to allocate. static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned NumLoops); /// Gets/sets the associated loops after tiling. /// /// This is in de-sugared format stored as a CompoundStmt. /// /// \code /// for (...) /// ... /// \endcode /// /// Note that if the generated loops a become associated loops of another /// directive, they may need to be hoisted before them. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return preinits statement. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTileDirectiveClass; } }; /// This represents the '#pragma omp unroll' loop transformation directive. /// /// \code /// #pragma omp unroll /// for (int i = 0; i < 64; ++i) /// \endcode class OMPUnrollDirective final : public OMPLoopTransformationDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPLoopTransformationDirective(OMPUnrollDirectiveClass, llvm::omp::OMPD_unroll, StartLoc, EndLoc, 1) {} /// Set the pre-init statements. void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } /// Set the de-sugared statement. void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp unroll'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPUnrollDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumGeneratedLoops, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp unroll' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. static OMPUnrollDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses); /// Get the de-sugared associated loops after unrolling. /// /// This is only used if the unrolled loop becomes an associated loop of /// another directive, otherwise the loop is emitted directly using loop /// transformation metadata. When the unrolled loop cannot be used by another /// directive (e.g. because of the full clause), the transformed stmt can also /// be nullptr. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return the pre-init statements. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPUnrollDirectiveClass; } }; /// This represents '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' with /// list item 'a'. class OMPScanDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPScanDirective() : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPScanDirectiveClass; } }; /// This represents '#pragma omp interop' directive. /// /// \code /// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait /// \endcode /// In this example directive '#pragma omp interop' has /// clauses 'init', 'device', 'depend' and 'nowait'. /// class OMPInteropDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending location of the directive. /// OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPInteropDirective() : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending Location of the directive. /// \param Clauses The directive's clauses. /// static OMPInteropDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive. /// /// \param C AST context. /// static OMPInteropDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPInteropDirectiveClass; } }; /// This represents '#pragma omp dispatch' directive. /// /// \code /// #pragma omp dispatch device(dnum) /// \endcode /// This example shows a directive '#pragma omp dispatch' with a /// device clause with variable 'dnum'. /// class OMPDispatchDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// The location of the target-call. SourceLocation TargetCallLoc; /// Set the location of the target-call. void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; } /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDispatchDirective() : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TargetCallLoc Location of the target-call. /// static OMPDispatchDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, SourceLocation TargetCallLoc); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDispatchDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return location of target-call. SourceLocation getTargetCallLoc() const { return TargetCallLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDispatchDirectiveClass; } }; /// This represents '#pragma omp masked' directive. /// \code /// #pragma omp masked filter(tid) /// \endcode /// This example shows a directive '#pragma omp masked' with a filter clause /// with variable 'tid'. /// class OMPMaskedDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMaskedDirective() : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMaskedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMaskedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMaskedDirectiveClass; } }; /// This represents '#pragma omp metadirective' directive. /// /// \code /// #pragma omp metadirective when(user={condition(N>10)}: parallel for) /// \endcode /// In this example directive '#pragma omp metadirective' has clauses 'when' /// with a dynamic user condition to check if a variable 'N > 10' /// class OMPMetaDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; Stmt *IfStmt; OMPMetaDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMetaDirectiveClass, llvm::omp::OMPD_metadirective, StartLoc, EndLoc) {} explicit OMPMetaDirective() : OMPExecutableDirective(OMPMetaDirectiveClass, llvm::omp::OMPD_metadirective, SourceLocation(), SourceLocation()) {} void setIfStmt(Stmt *S) { IfStmt = S; } public: static OMPMetaDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Stmt *IfStmt); static OMPMetaDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); Stmt *getIfStmt() const { return IfStmt; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMetaDirectiveClass; } }; /// This represents '#pragma omp loop' directive. /// /// \code /// #pragma omp loop private(a,b) binding(parallel) order(concurrent) /// \endcode /// In this example directive '#pragma omp loop' has /// clauses 'private' with the variables 'a' and 'b', 'binding' with /// modifier 'parallel' and 'order(concurrent). /// class OMPGenericLoopDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPGenericLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPGenericLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with a place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// \param CollapsedNum Number of collapsed nested loops. /// static OMPGenericLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPGenericLoopDirectiveClass; } }; /// This represents '#pragma omp teams loop' directive. /// /// \code /// #pragma omp teams loop private(a,b) order(concurrent) /// \endcode /// In this example directive '#pragma omp teams loop' has /// clauses 'private' with the variables 'a' and 'b', and order(concurrent). /// class OMPTeamsGenericLoopDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsGenericLoopDirectiveClass, llvm::omp::OMPD_teams_loop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsGenericLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsGenericLoopDirectiveClass, llvm::omp::OMPD_teams_loop, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsGenericLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsGenericLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass; } }; /// This represents '#pragma omp target teams loop' directive. /// /// \code /// #pragma omp target teams loop private(a,b) order(concurrent) /// \endcode /// In this example directive '#pragma omp target teams loop' has /// clauses 'private' with the variables 'a' and 'b', and order(concurrent). /// class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsGenericLoopDirectiveClass, llvm::omp::OMPD_target_teams_loop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsGenericLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsGenericLoopDirectiveClass, llvm::omp::OMPD_target_teams_loop, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsGenericLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsGenericLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass; } }; /// This represents '#pragma omp parallel loop' directive. /// /// \code /// #pragma omp parallel loop private(a,b) order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel loop' has /// clauses 'private' with the variables 'a' and 'b', and order(concurrent). /// class OMPParallelGenericLoopDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelGenericLoopDirectiveClass, llvm::omp::OMPD_parallel_loop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelGenericLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelGenericLoopDirectiveClass, llvm::omp::OMPD_parallel_loop, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelGenericLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelGenericLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelGenericLoopDirectiveClass; } }; /// This represents '#pragma omp target parallel loop' directive. /// /// \code /// #pragma omp target parallel loop private(a,b) order(concurrent) /// \endcode /// In this example directive '#pragma omp target parallel loop' has /// clauses 'private' with the variables 'a' and 'b', and order(concurrent). /// class OMPTargetParallelGenericLoopDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass, llvm::omp::OMPD_target_parallel_loop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelGenericLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass, llvm::omp::OMPD_target_parallel_loop, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelGenericLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelGenericLoopDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass; } }; } // end namespace clang #endif
test68.c
#include<omp.h> int main() { //volatile int arr[1000][1000][2]; //arr[0][0] = 0; volatile int sh; #pragma omp parallel { long long i, j, k; //arr[0][0][0] = 0; for(i = 0; i < 5000; i++) for(j = 0; j < 100000; j++) { #pragma omp flush } } }
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } } else { // sparse reduce if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { cudaSetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) { const int key = std::get<0>(sorted_key_attrs_[i]); const TShape& shape = std::get<1>(sorted_key_attrs_[i]); const int type = std::get<2>(sorted_key_attrs_[i]); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } std::vector<KeyAttrs> sorted_key_attrs_; /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; bool inited_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
GB_ewise_slice.c
//------------------------------------------------------------------------------ // GB_ewise_slice: slice the entries and vectors for an ewise operation //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Constructs a set of tasks to compute C, for an element-wise operation // (GB_add, GB_emult, and GB_mask) that operates on two input matrices, // C=op(A,B). The mask is ignored for computing where to slice the work, but // it is sliced once the location has been found. #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (Coarse, ntasks1+1, sizeof (int64_t)) ; \ GB_FREE_MEMORY (Cwork, Cnvec+1, sizeof (int64_t)) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_FREE_MEMORY (TaskList, max_ntasks+1, sizeof (GB_task_struct)) ; \ } #include "GB.h" //------------------------------------------------------------------------------ // GB_ewise_slice //------------------------------------------------------------------------------ GrB_Info GB_ewise_slice ( // output: GB_task_struct **p_TaskList, // array of structs, of size max_ntasks int *p_max_ntasks, // size of TaskList int *p_ntasks, // # of tasks constructed int *p_nthreads, // # of threads for eWise operation // input: const int64_t Cnvec, // # of vectors of C const int64_t *GB_RESTRICT Ch, // vectors of C, if hypersparse const int64_t *GB_RESTRICT C_to_M, // mapping of C to M const int64_t *GB_RESTRICT C_to_A, // mapping of C to A const int64_t *GB_RESTRICT C_to_B, // mapping of C to B bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only const GrB_Matrix M, // mask matrix to slice (optional) const GrB_Matrix A, // matrix to slice const GrB_Matrix B, // matrix to slice GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_TaskList != NULL) ; ASSERT (p_max_ntasks != NULL) ; ASSERT (p_ntasks != NULL) ; ASSERT (p_nthreads != NULL) ; ASSERT_MATRIX_OK (A, "A for ewise_slice", GB0) ; ASSERT_MATRIX_OK (B, "B for ewise_slice", GB0) ; (*p_TaskList ) = NULL ; (*p_max_ntasks) = 0 ; (*p_ntasks ) = 0 ; (*p_nthreads ) = 1 ; int64_t *GB_RESTRICT Cwork = NULL ; int64_t *GB_RESTRICT Coarse = NULL ; // size ntasks1+1 int ntasks1 = 0 ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // allocate the initial TaskList //-------------------------------------------------------------------------- // Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow // later, if needed. Usually, 64*nthreads_max is enough, but in a few cases // fine tasks can cause this number to be exceeded. If that occurs, // TaskList is reallocated. // When the mask is present, it is often fastest to break the work up // into tasks, even when nthreads_max is 1. GB_task_struct *GB_RESTRICT TaskList = NULL ; int max_ntasks = 0 ; int ntasks0 = (M == NULL && nthreads_max == 1) ? 1 : (32 * nthreads_max) ; GB_REALLOC_TASK_LIST (TaskList, ntasks0, max_ntasks) ; //-------------------------------------------------------------------------- // check for quick return for a single task //-------------------------------------------------------------------------- if (Cnvec == 0 || ntasks0 == 1) { // construct a single coarse task that computes all of C TaskList [0].kfirst = 0 ; TaskList [0].klast = Cnvec-1 ; (*p_TaskList ) = TaskList ; (*p_max_ntasks) = max_ntasks ; (*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ; (*p_nthreads ) = 1 ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get A, B, and M //-------------------------------------------------------------------------- const int64_t vlen = A->vlen ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ai = A->i ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bi = B->i ; bool Ch_is_Ah = (Ch != NULL && A->h != NULL && Ch == A->h) ; bool Ch_is_Bh = (Ch != NULL && B->h != NULL && Ch == B->h) ; const int64_t *GB_RESTRICT Mp = NULL ; const int64_t *GB_RESTRICT Mi = NULL ; if (M != NULL) { Mp = M->p ; Mi = M->i ; // Ch_is_Mh is true if either true on input (for GB_add, which denotes // that Ch is a deep copy of M->h), or if Ch is a shallow copy of M->h. Ch_is_Mh = Ch_is_Mh || (Ch != NULL && M->h != NULL && Ch == M->h) ; } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (Cwork, Cnvec+1, sizeof (int64_t)) ; if (Cwork == NULL) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute an estimate of the work for each vector of C //-------------------------------------------------------------------------- int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) for (k = 0 ; k < Cnvec ; k++) { //---------------------------------------------------------------------- // get the C(:,j) vector //---------------------------------------------------------------------- int64_t j = (Ch == NULL) ? k : Ch [k] ; //---------------------------------------------------------------------- // get the corresponding vector of A //---------------------------------------------------------------------- int64_t kA ; if (C_to_A != NULL) { // A is hypersparse and the C_to_A mapping has been created ASSERT (A->is_hyper || A->is_slice) ; kA = C_to_A [k] ; ASSERT (kA >= -1 && kA < A->nvec) ; if (kA >= 0) { ASSERT (j == ((A->is_hyper) ? A->h [kA] : (A->hfirst + kA))) ; } } else if (Ch_is_Ah) { // A is hypersparse, but Ch is a shallow copy of A->h kA = k ; ASSERT (j == A->h [kA]) ; } else { // A is standard ASSERT (!A->is_hyper) ; ASSERT (!A->is_slice) ; ASSERT (A->h == NULL) ; kA = j ; } //---------------------------------------------------------------------- // get the corresponding vector of B //---------------------------------------------------------------------- int64_t kB ; if (C_to_B != NULL) { // B is hypersparse and the C_to_B mapping has been created ASSERT (B->is_hyper || B->is_slice) ; kB = C_to_B [k] ; ASSERT (kB >= -1 && kB < B->nvec) ; if (kB >= 0) { ASSERT (j == ((B->is_hyper) ? B->h [kB] : (B->hfirst + kB))) ; } } else if (Ch_is_Bh) { // B is hypersparse, but Ch is a shallow copy of B->h kB = k ; ASSERT (j == B->h [kB]) ; } else { // B is standard ASSERT (!B->is_hyper) ; ASSERT (!B->is_slice) ; ASSERT (B->h == NULL) ; kB = j ; } //---------------------------------------------------------------------- // estimate the work for C(:,j) //---------------------------------------------------------------------- ASSERT (kA >= -1 && kA < A->nvec) ; ASSERT (kB >= -1 && kB < B->nvec) ; int64_t aknz = (kA < 0) ? 0 : (Ap [kA+1] - Ap [kA]) ; int64_t bknz = (kB < 0) ? 0 : (Bp [kB+1] - Bp [kB]) ; Cwork [k] = aknz + bknz + 1 ; } //-------------------------------------------------------------------------- // replace Cwork with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork) ; double cwork = (double) Cwork [Cnvec] ; //-------------------------------------------------------------------------- // determine # of threads and tasks for the eWise operation //-------------------------------------------------------------------------- int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ; ntasks0 = (M == NULL && nthreads == 1) ? 1 : (32 * nthreads) ; double target_task_size = cwork / (double) (ntasks0) ; target_task_size = GB_IMAX (target_task_size, chunk) ; ntasks1 = cwork / target_task_size ; ntasks1 = GB_IMAX (ntasks1, 1) ; //-------------------------------------------------------------------------- // slice the work into coarse tasks //-------------------------------------------------------------------------- if (!GB_pslice (&Coarse, Cwork, Cnvec, ntasks1)) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // construct all tasks, both coarse and fine //-------------------------------------------------------------------------- int ntasks = 0 ; for (int t = 0 ; t < ntasks1 ; t++) { //---------------------------------------------------------------------- // coarse task computes C (:,k:klast) //---------------------------------------------------------------------- int64_t k = Coarse [t] ; int64_t klast = Coarse [t+1] - 1 ; if (k >= Cnvec) { //------------------------------------------------------------------ // all tasks have been constructed //------------------------------------------------------------------ break ; } else if (k < klast) { //------------------------------------------------------------------ // coarse task has 2 or more vectors //------------------------------------------------------------------ // This is a non-empty coarse-grain task that does two or more // entire vectors of C, vectors k:klast, inclusive. GB_REALLOC_TASK_LIST (TaskList, ntasks + 1, max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = klast ; ntasks++ ; } else { //------------------------------------------------------------------ // coarse task has 0 or 1 vectors //------------------------------------------------------------------ // As a coarse-grain task, this task is empty or does a single // vector, k. Vector k must be removed from the work done by this // and any other coarse-grain task, and split into one or more // fine-grain tasks. for (int tt = t ; tt < ntasks1 ; tt++) { // remove k from the initial slice tt if (Coarse [tt] == k) { // remove k from task tt Coarse [tt] = k+1 ; } else { // break, k not in task tt break ; } } //------------------------------------------------------------------ // get the vector of C //------------------------------------------------------------------ int64_t j = (Ch == NULL) ? k : Ch [k] ; //------------------------------------------------------------------ // get the corresponding vector of A //------------------------------------------------------------------ int64_t kA ; if (C_to_A != NULL) { // A is hypersparse and the C_to_A mapping has been created kA = C_to_A [k] ; } else if (Ch_is_Ah) { // A is hypersparse, but Ch is a shallow copy of A->h kA = k ; } else { // A is standard kA = j ; } int64_t pA_start = (kA < 0) ? -1 : Ap [kA] ; int64_t pA_end = (kA < 0) ? -1 : Ap [kA+1] ; bool a_empty = (pA_end == pA_start) ; //------------------------------------------------------------------ // get the corresponding vector of B //------------------------------------------------------------------ int64_t kB ; if (C_to_B != NULL) { // B is hypersparse and the C_to_B mapping has been created kB = C_to_B [k] ; } else if (Ch_is_Bh) { // B is hypersparse, but Ch is a shallow copy of B->h kB = k ; } else { // B is standard kB = j ; } int64_t pB_start = (kB < 0) ? -1 : Bp [kB] ; int64_t pB_end = (kB < 0) ? -1 : Bp [kB+1] ; bool b_empty = (pB_end == pB_start) ; //------------------------------------------------------------------ // get the corresponding vector of M, if present //------------------------------------------------------------------ int64_t pM_start = -1 ; int64_t pM_end = -1 ; if (M != NULL) { int64_t kM ; if (C_to_M != NULL) { // M is hypersparse and the C_to_M mapping has been created kM = C_to_M [k] ; } else if (Ch_is_Mh) { // Ch is a deep or shallow copy of Mh kM = k ; } else { // M is standard kM = j ; } pM_start = (kM < 0) ? -1 : Mp [kM] ; pM_end = (kM < 0) ? -1 : Mp [kM+1] ; } bool m_empty = (pM_end == pM_start) ; //------------------------------------------------------------------ // determine the # of fine-grain tasks to create for vector k //------------------------------------------------------------------ double ckwork = Cwork [k+1] - Cwork [k] ; int nfine = ckwork / target_task_size ; nfine = GB_IMAX (nfine, 1) ; // make the TaskList bigger, if needed GB_REALLOC_TASK_LIST (TaskList, ntasks + nfine, max_ntasks) ; //------------------------------------------------------------------ // create the fine-grain tasks //------------------------------------------------------------------ if (nfine == 1) { //-------------------------------------------------------------- // this is a single coarse task for all of vector k //-------------------------------------------------------------- TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = k ; ntasks++ ; } else { //-------------------------------------------------------------- // slice vector k into nfine fine tasks //-------------------------------------------------------------- // first fine task starts at the top of vector k ASSERT (ntasks < max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -1 ; // this is a fine task TaskList [ntasks].pM = (m_empty) ? -1 : pM_start ; TaskList [ntasks].pA = (a_empty) ? -1 : pA_start ; TaskList [ntasks].pB = (b_empty) ? -1 : pB_start ; TaskList [ntasks].len = 0 ; // to be determined below ntasks++ ; int64_t ilast = 0, i = 0 ; for (int tfine = 1 ; tfine < nfine ; tfine++) { double target_work = ((nfine-tfine) * ckwork) / nfine ; int64_t pM, pA, pB ; GB_slice_vector (&i, &pM, &pA, &pB, pM_start, pM_end, Mi, // Mi NULL if M not present pA_start, pA_end, Ai, 0, // Ai always explicit list pB_start, pB_end, Bi, // Bi always explicit list vlen, target_work) ; // prior task ends at pM-1, pA-1, and pB-1 TaskList [ntasks-1].pM_end = pM ; TaskList [ntasks-1].pA_end = pA ; TaskList [ntasks-1].pB_end = pB ; // prior task handles indices ilast:i-1 TaskList [ntasks-1].len = i - ilast ; // this task starts at pM, pA, and pB ASSERT (ntasks < max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -1 ; // this is a fine task TaskList [ntasks].pM = pM ; TaskList [ntasks].pA = pA ; TaskList [ntasks].pB = pB ; // advance to the next task ntasks++ ; ilast = i ; } // Terminate the last fine task. ASSERT (ntasks <= max_ntasks) ; TaskList [ntasks-1].pM_end = (m_empty) ? -1 : pM_end ; TaskList [ntasks-1].pA_end = (a_empty) ? -1 : pA_end ; TaskList [ntasks-1].pB_end = (b_empty) ? -1 : pB_end ; TaskList [ntasks-1].len = vlen - i ; } } } ASSERT (ntasks <= max_ntasks) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; (*p_TaskList ) = TaskList ; (*p_max_ntasks) = max_ntasks ; (*p_ntasks ) = ntasks ; (*p_nthreads ) = nthreads ; return (GrB_SUCCESS) ; }
matrix.h
/* Copyright 2016 Waizung Taam Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - 2016-08-17 - ======== tensor::Matrix ======== - namespace tensor - class Matrix - Declaration - type - constructors, etc - shape - iterators - accessors - modifiers - arithmetic - comparisons - io - helper functions - private data member - Implementation - Same order as declared - namespace internal before - transpose - arithmetic - comparisons - class MatrixException */ #ifndef TENSOR_MATRIX_H_ #define TENSOR_MATRIX_H_ #include "vector.h" #include <algorithm> #include <cmath> #include <exception> #include <fstream> #include <functional> #include <iomanip> #include <iostream> #include <limits> #include <string> #include <type_traits> #include <utility> #include <vector> #include <omp.h> #include <x86intrin.h> namespace tensor { class MatrixException; template <typename Tp> class Matrix { public: // ======== Types ======== typedef Tp value_type; typedef typename std::vector<Vector<Tp>>::size_type size_type; typedef typename std::make_signed<size_type>::type index_type; typedef typename std::vector<Vector<Tp>>::difference_type difference_type; typedef size_type dimension_type; typedef typename std::vector<Vector<Tp>>::iterator iterator; typedef typename std::vector<Vector<Tp>>::const_iterator const_iterator; typedef typename std::vector<Vector<Tp>>::reverse_iterator reverse_iterator; typedef typename std::vector<Vector<Tp>>::const_reverse_iterator const const_reverse_iterator; // ======== Constructors, etc ======== Matrix(); Matrix(const size_type& num_rows, const size_type& num_cols); Matrix(const size_type& num_rows, const size_type& num_cols, const Tp& val_init); template <typename OtherT> Matrix(const size_type& num_rows, const size_type& num_cols, const OtherT& val_cast); Matrix(const Matrix& mat_init); template <typename OtherT> Matrix(const Matrix<OtherT>& mat_cast); Matrix(Matrix&& mat_init); template <typename OtherT> Matrix(Matrix<OtherT>&& mat_cat); /*explicit */Matrix(const std::vector<std::vector<Tp>>& stdvec_init); template <typename OtherT> /*explicit */Matrix(const std::vector<std::vector<OtherT>>& stdvec_cast); /*explicit */Matrix(const std::initializer_list< std::initializer_list<Tp>>& il_init); template <typename OtherT> /*explicit */Matrix(const std::initializer_list< std::initializer_list<OtherT>>& il_cast); explicit Matrix(const Vector<Tp>& vec_init); template <typename OtherT> explicit Matrix(const Vector<OtherT>& vec_cast); template <typename ParamT1, typename ParamT2> Matrix(const size_type& num_rows, const size_type& num_cols, Random::Distribution dis, const ParamT1& param1, const ParamT2& param2); template <typename ParamT> Matrix(const size_type& num_rows, const size_type& num_cols, Random::Distribution dis, const ParamT& param); Matrix& operator=(const Tp& val_assign); template <typename OtherT> Matrix& operator=(const OtherT& val_cast); Matrix& operator=(const Matrix& mat_copy); template <typename OtherT> Matrix& operator=(const Matrix<OtherT>& mat_cast); Matrix& operator=(Matrix&& mat_move); template <typename OtherT> Matrix& operator=(Matrix<OtherT>&& mat_cast); Matrix& operator=(const std::vector<std::vector<Tp>>& stdvec_assign); template <typename OtherT> Matrix& operator=(const std::vector<std::vector<OtherT>>& stdvec_cast); Matrix& operator=(const std::initializer_list< std::initializer_list<Tp>>& il_assign); template <typename OtherT> Matrix& operator=(const std::initializer_list< std::initializer_list<OtherT>>& il_cast); Matrix& operator=(const Vector<Tp>& vec_assign); template <typename OtherT> Matrix& operator=(const Vector<OtherT>& vec_cast); ~Matrix(); // ======== Shape ======== Vector<size_type> shape() const; void clear(); bool empty(); // ======== Iterators ======== iterator begin(); iterator end(); const_iterator begin() const; const_iterator end() const; const_iterator cbegin() const; const_iterator cend() const; reverse_iterator rbegin(); reverse_iterator rend(); const_reverse_iterator rbegin() const; const_reverse_iterator rend() const; const_reverse_iterator crbegin() const; const_reverse_iterator crend() const; // ======== Accessors ======== Vector<Tp>& operator[](const index_type& index); const Vector<Tp>& operator[](const index_type& index) const; Matrix operator()(const index_type& idx_row) const; Matrix operator()(const const_iterator& cit_row) const; Matrix operator()(const index_type& idx_row_begin, const index_type& idx_row_end) const; Matrix operator()(const const_iterator& cit_row_begin, const const_iterator& cit_row_end) const; Matrix operator()(const index_type& idx_row_begin, const index_type& idx_row_end, const index_type& idx_col_begin, const index_type& idx_col_end) const; // ======== Modifiers ======== Matrix insert(const Vector<Tp>& vec_insert, const dimension_type& dim_insert, const index_type& idx_insert) const; Matrix insert(const Matrix& mat_insert, const dimension_type& dim_insert, const index_type& idx_insert) const; Matrix remove(const dimension_type& dim_remove, const index_type& idx_remove) const; Matrix remove(const dimension_type& dim_remove, const index_type& idx_begin, const index_type& idx_end) const; Matrix replace(const Vector<Tp>& vec_replace, const dimension_type& dim_replace, const index_type& idx_row_begin, const index_type& idx_col_begin) const; Matrix replace(const Matrix& mat_replace, const index_type& idx_row_begin, const index_type& idx_col_begin) const; Matrix transpose() const; Matrix T() const; Matrix reshape(const size_type& num_rows, const size_type& num_cols) const; Matrix shuffle() const; // ======== Arithmetic ======== template <typename AriT> friend Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs, const AriT& val_rhs); template <typename AriT> friend Matrix<AriT> operator+(const AriT& val_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs, const AriT& val_rhs); template <typename AriT> friend Matrix<AriT> operator-(const AriT& val_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs, const AriT& val_rhs); template <typename AriT> friend Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs, const Vector<AriT>& vec_rhs); template <typename AriT> friend Matrix<AriT> operator*(const Vector<AriT>& vec_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator*(const AriT& val_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs); template <typename AriT> friend Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs, const AriT& val_rhs); template <typename AriT> friend Matrix<AriT> operator/(const AriT& val_lhs, const Matrix<AriT>& mat_rhs); void operator+=(const Matrix& mat_rhs); void operator+=(const Tp& val_rhs); void operator-=(const Matrix& mat_rhs); void operator-=(const Tp& val_rhs); void operator*=(const Matrix& mat_rhs); void operator*=(const Tp& val_rhs); void operator/=(const Matrix& mat_rhs); void operator/=(const Tp& val_rhs); Matrix times(const Matrix& mat_rhs) const; Tp sum() const; Vector<Tp> sum(const dimension_type& dim_sum) const; // ======== Comparisons ======== template <typename CmpT> friend Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs); template <typename CmpT> friend Matrix<CmpT> operator==(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs); template <typename CmpT> friend Matrix<CmpT> operator!=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs); template <typename CmpT> friend Matrix<CmpT> operator<(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs); template <typename CmpT> friend Matrix<CmpT> operator<=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs); template <typename CmpT> friend Matrix<CmpT> operator>(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs); template <typename CmpT> friend Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs); template <typename CmpT> friend Matrix<CmpT> operator>=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs); bool equal(const Matrix& mat_rhs, std::size_t ulp = 1); bool nequal(const Matrix& mat_rhs, std::size_t ulp = 1); Tp max() const; Vector<Tp> max(const dimension_type& dim_max) const; Tp min() const; Vector<Tp> min(const dimension_type& dim_min) const; // ======== IO ======== template <typename MatT, typename CharT, typename Traits> friend std::basic_ostream<CharT, Traits>& operator<<( std::basic_ostream<CharT, Traits>& os, const Matrix<MatT>& mat); template <typename MatT, typename CharT, typename Traits> friend std::basic_istream<CharT, Traits>& operator>>( std::basic_istream<CharT, Traits>& is, Matrix<MatT>& mat); private: // ======== Helper Functions ======== static index_type to_positive_index_(const size_type& size, const index_type& index); static void exclusive_range_check_(const size_type& size, const index_type& index); static void exclusive_range_check_(const iterator& it_begin, const iterator& it_end, const iterator& it); static void exclusive_range_check_(const const_iterator& cit_begin, const const_iterator& cit_end, const const_iterator& cit); static void inclusive_range_check_(const size_type& size, const index_type& index); static void inclusive_range_check_(const iterator& it_begin, const iterator& it_end, const iterator& it); static void inclusive_range_check_(const const_iterator& cit_begin, const const_iterator& cit_end, const const_iterator& cit); static void shape_consistence_check_(const Vector<size_type>& shape_lhs, const Vector<size_type>& shape_rhs); static void index_order_check_(const size_type& size, const index_type& idx_begin, const index_type& idx_end); index_type to_positive_index_(const index_type& index) const; void exclusive_range_check_(const index_type& index) const; void exclusive_range_check_(const iterator& it); void exclusive_range_check_(const const_iterator& cit) const; void inclusive_range_check_(const index_type& index) const; void inclusive_range_check_(const iterator& it); void inclusive_range_check_(const const_iterator& cit) const; void index_order_check_(const index_type& idx_begin, const index_type& idx_end) const; void iterator_order_check_(const iterator& it_begin, const iterator& it_end); void const_iterator_order_check_(const const_iterator& cit_begin, const const_iterator& cit_end) const; // ======== Private Data Member ======== std::vector<Vector<Tp>> mat_; }; // ======== Constructors, etc ======== template <typename Tp> Matrix<Tp>::Matrix() {} template <typename Tp> Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols) : mat_(std::vector<Vector<Tp>>(num_rows, Vector<Tp>(num_cols))) {} template <typename Tp> Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols, const Tp& val_init) : mat_(std::vector<Vector<Tp>>(num_rows, Vector<Tp>(num_cols, val_init))) {} template <typename Tp> template <typename OtherT> Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols, const OtherT& val_cast) : mat_(std::vector<Vector<Tp>>(num_rows, Vector<Tp>(num_cols, val_cast))) {} template <typename Tp> Matrix<Tp>::Matrix(const Matrix& mat_init) : mat_(mat_init.mat_) {} template <typename Tp> template <typename OtherT> Matrix<Tp>::Matrix(const Matrix<OtherT>& mat_cast) { mat_.resize(mat_cast.shape()[0]); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(mat_cast[idx_row]); } } template <typename Tp> Matrix<Tp>::Matrix(Matrix&& mat_init) : mat_(std::move(mat_init.mat_)) {} template <typename Tp> template <typename OtherT> Matrix<Tp>::Matrix(Matrix<OtherT>&& mat_cast) { mat_.resize(mat_cast.shape()[0]); Matrix<OtherT> mat_cache = std::move(mat_cast); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(mat_cache[idx_row]); } } template <typename Tp> Matrix<Tp>::Matrix(const std::vector<std::vector<Tp>>& stdvec_init) { mat_.resize(stdvec_init.size()); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(stdvec_init[idx_row]); } } template <typename Tp> template <typename OtherT> Matrix<Tp>::Matrix(const std::vector<std::vector<OtherT>>& stdvec_cast) { mat_.resize(stdvec_cast.size()); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(stdvec_cast[idx_row]); } } template <typename Tp> Matrix<Tp>::Matrix(const std::initializer_list< std::initializer_list<Tp>>& il_init) { mat_.resize(il_init.size()); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(*(il_init.begin() + idx_row)); } } template <typename Tp> template <typename OtherT> Matrix<Tp>::Matrix(const std::initializer_list< std::initializer_list<OtherT>>& il_cast) { mat_.resize(il_cast.size()); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(*(il_cast.begin() + idx_row)); } } template <typename Tp> Matrix<Tp>::Matrix(const Vector<Tp>& vec_init) { mat_.resize(vec_init.shape()[0]); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(1, vec_init[idx_row]); } } template <typename Tp> template <typename OtherT> Matrix<Tp>::Matrix(const Vector<OtherT>& vec_cast) { mat_.resize(vec_cast.shape()[0]); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(1, vec_cast[idx_row]); } } template <typename Tp> template <typename ParamT1, typename ParamT2> Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols, Random::Distribution dis, const ParamT1& param1, const ParamT2& param2) { mat_.resize(num_rows); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(num_cols, dis, param1, param2); } } template <typename Tp> template <typename ParamT> Matrix<Tp>::Matrix(const size_type& num_rows, const size_type& num_cols, Random::Distribution dis, const ParamT& param) { mat_.resize(num_rows); for (size_type idx_row = 0; idx_row < mat_.size(); ++idx_row) { mat_[idx_row] = Vector<Tp>(num_cols, dis, param); } } template <typename Tp> Matrix<Tp>& Matrix<Tp>::operator=(const Tp& val_assign) { mat_ = Matrix<Tp>(shape()[0], shape()[1], val_assign).mat_; return *this; } template <typename Tp> template <typename OtherT> Matrix<Tp>& Matrix<Tp>::operator=(const OtherT& val_cast) { mat_ = Matrix<Tp>(shape()[0], shape()[1], val_cast).mat_; return *this; } template <typename Tp> Matrix<Tp>& Matrix<Tp>::operator=(const Matrix& mat_copy) { mat_ = mat_copy.mat_; return *this; } template <typename Tp> template <typename OtherT> Matrix<Tp>& Matrix<Tp>::operator=(const Matrix<OtherT>& mat_cast) { mat_ = Matrix<Tp>(mat_cast).mat_; return *this; } template <typename Tp> Matrix<Tp>& Matrix<Tp>::operator=(Matrix&& mat_move) { mat_ = Matrix<Tp>(mat_move).mat_; return *this; } template <typename Tp> template <typename OtherT> Matrix<Tp>& Matrix<Tp>::operator=(Matrix<OtherT>&& mat_cast) { mat_ = Matrix<Tp>(mat_cast).mat_; return *this; } template <typename Tp> Matrix<Tp>& Matrix<Tp>::operator=(const std::vector< std::vector<Tp>>& stdvec_assign) { mat_ = Matrix<Tp>(stdvec_assign).mat_; return *this; } template <typename Tp> template <typename OtherT> Matrix<Tp>& Matrix<Tp>::operator=(const std::vector< std::vector<OtherT>>& stdvec_cast) { mat_ = Matrix<Tp>(stdvec_cast).mat_; return *this; } template <typename Tp> Matrix<Tp>& Matrix<Tp>::operator=(const std::initializer_list< std::initializer_list<Tp>>& il_assign) { mat_ = Matrix<Tp>(il_assign).mat_; return *this; } template <typename Tp> template <typename OtherT> Matrix<Tp>& Matrix<Tp>::operator=(const std::initializer_list< std::initializer_list<OtherT>>& il_cast) { mat_ = Matrix<Tp>(il_cast).mat_; return *this; } template <typename Tp> Matrix<Tp>& Matrix<Tp>::operator=(const Vector<Tp>& vec_assign) { mat_ = Matrix(vec_assign).mat_; return *this; } template <typename Tp> template <typename OtherT> Matrix<Tp>& Matrix<Tp>::operator=(const Vector<OtherT>& vec_cast) { mat_ = Matrix(vec_cast).mat_; return *this; } template <typename Tp> Matrix<Tp>::~Matrix() {} // ======== Shape ======== template <typename Tp> Vector<typename Matrix<Tp>::size_type> Matrix<Tp>::shape() const { if (mat_.size() == 0) { return Vector<size_type>({0, 0}); } return Vector<size_type>({mat_.size(), mat_[0].shape()[0]}); } template <typename Tp> void Matrix<Tp>::clear() { mat_.clear(); } template <typename Tp> bool Matrix<Tp>::empty() { return mat_.size() == 0; } // ======== Iterators ======== template <typename Tp> typename Matrix<Tp>::iterator Matrix<Tp>::begin() { return mat_.begin(); } template <typename Tp> typename Matrix<Tp>::iterator Matrix<Tp>::end() { return mat_.end(); } template <typename Tp> typename Matrix<Tp>::const_iterator Matrix<Tp>::begin() const { return mat_.cbegin(); } template <typename Tp> typename Matrix<Tp>::const_iterator Matrix<Tp>::end() const { return mat_.cend(); } template <typename Tp> typename Matrix<Tp>::const_iterator Matrix<Tp>::cbegin() const { return mat_.cbegin(); } template <typename Tp> typename Matrix<Tp>::const_iterator Matrix<Tp>::cend() const { return mat_.cend(); } template <typename Tp> typename Matrix<Tp>::reverse_iterator Matrix<Tp>::rbegin() { return mat_.rbegin(); } template <typename Tp> typename Matrix<Tp>::reverse_iterator Matrix<Tp>::rend() { return mat_.rend(); } template <typename Tp> typename Matrix<Tp>::const_reverse_iterator Matrix<Tp>::rbegin() const { return mat_.crbegin(); } template <typename Tp> typename Matrix<Tp>::const_reverse_iterator Matrix<Tp>::rend() const { return mat_.crend(); } template <typename Tp> typename Matrix<Tp>::const_reverse_iterator Matrix<Tp>::crbegin() const { return mat_.crbegin(); } template <typename Tp> typename Matrix<Tp>::const_reverse_iterator Matrix<Tp>::crend() const { return mat_.crend(); } // ======== Accessors ======== template <typename Tp> Vector<Tp>& Matrix<Tp>::operator[](const index_type& index) { exclusive_range_check_(index); return mat_.at(to_positive_index_(index)); } template <typename Tp> const Vector<Tp>& Matrix<Tp>::operator[](const index_type& index) const { exclusive_range_check_(index); return mat_.at(to_positive_index_(index)); } template <typename Tp> Matrix<Tp> Matrix<Tp>::operator()(const index_type& idx_row) const { exclusive_range_check_(idx_row); Matrix row_mat(1, shape()[1]); row_mat[0] = mat_[to_positive_index_(idx_row)]; return row_mat; } template <typename Tp> Matrix<Tp> Matrix<Tp>::operator()(const const_iterator& cit_row) const { exclusive_range_check_(cit_row); Matrix row_mat(1, shape()[1]); row_mat[0] = *cit_row; return row_mat; } template <typename Tp> Matrix<Tp> Matrix<Tp>::operator()(const index_type& idx_row_begin, const index_type& idx_row_end) const { exclusive_range_check_(idx_row_begin); inclusive_range_check_(idx_row_end); index_order_check_(idx_row_begin, idx_row_end); size_type idx_row_begin_p = to_positive_index_(idx_row_begin); size_type idx_row_end_p = to_positive_index_(idx_row_end); Matrix mat_partial(idx_row_end_p - idx_row_begin_p, shape()[1]); for (size_type idx_row = 0; idx_row < mat_partial.shape()[0]; ++idx_row) { mat_partial[idx_row] = mat_[idx_row_begin_p + idx_row]; } return mat_partial; } template <typename Tp> Matrix<Tp> Matrix<Tp>::operator()(const const_iterator& cit_row_begin, const const_iterator& cit_row_end) const { exclusive_range_check_(cit_row_begin); inclusive_range_check_(cit_row_end); const_iterator_order_check_(cit_row_begin, cit_row_end); Matrix mat_partial(static_cast<size_type>(cit_row_end - cit_row_begin), shape()[1]); for (size_type idx_row = 0; idx_row < mat_partial.shape()[0]; ++idx_row) { mat_partial[idx_row] = *(cit_row_begin + idx_row); } return mat_partial; } template <typename Tp> Matrix<Tp> Matrix<Tp>::operator()(const index_type& idx_row_begin, const index_type& idx_row_end, const index_type& idx_col_begin, const index_type& idx_col_end) const { exclusive_range_check_(idx_row_begin); inclusive_range_check_(idx_row_end); exclusive_range_check_(idx_col_begin); inclusive_range_check_(idx_col_end); index_order_check_(idx_row_begin, idx_row_end); index_order_check_(idx_col_begin, idx_col_end); size_type idx_row_begin_p = to_positive_index_(idx_row_begin); size_type idx_row_end_p = to_positive_index_(idx_row_end); size_type idx_col_begin_p = to_positive_index_(idx_col_begin); size_type idx_col_end_p = to_positive_index_(idx_col_end); Matrix mat_partial(idx_row_end_p - idx_row_begin_p, idx_col_end_p - idx_col_begin_p); for (size_type idx_row = 0; idx_row < mat_partial.shape()[0]; ++idx_row) { mat_partial[idx_row] = mat_[idx_row_begin_p + idx_row]( idx_col_begin_p, idx_col_end_p); } return mat_partial; } // ======== Modifiers ======== template <typename Tp> Matrix<Tp> Matrix<Tp>::insert(const Vector<Tp>& vec_insert, const dimension_type& dim_insert, const index_type& idx_insert) const { if (dim_insert == 0) { if (vec_insert.shape()[0] != shape()[1]) { std::string err_msg = "Inconsistent insert shape: insert size " + std::to_string(vec_insert.shape()[0]) + " != number of columns " + std::to_string(shape()[1]) + "."; throw MatrixException(err_msg); } inclusive_range_check_(idx_insert); Matrix mat_inserted = *this; mat_inserted.mat_.insert( mat_inserted.mat_.begin() + to_positive_index_(idx_insert), vec_insert); return mat_inserted; } else if (dim_insert == 1) { if (vec_insert.shape()[0] != shape()[0]) { std::string err_msg = "Inconsistent insert shape: insert size " + std::to_string(vec_insert.shape()[0]) + " != number of rows " + std::to_string(shape()[0]) + "."; throw MatrixException(err_msg); } Matrix mat_inserted = *this; for (size_type idx_row = 0; idx_row < mat_inserted.shape()[0]; ++idx_row) { mat_inserted.mat_[idx_row] = mat_inserted.mat_[idx_row].insert( vec_insert[idx_row], idx_insert); } return mat_inserted; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_insert) + " != 0 or 1."; throw MatrixException(err_msg); } } template <typename Tp> Matrix<Tp> Matrix<Tp>::insert(const Matrix& mat_insert, const dimension_type& dim_insert, const index_type& idx_insert) const { if (dim_insert == 0) { if (mat_insert.shape()[1] != shape()[1]) { std::string err_msg = "Inconsistent insert shape: insert size " + std::to_string(mat_insert.shape()[1]) + " != number of columns " + std::to_string(shape()[1]) + "."; throw MatrixException(err_msg); } inclusive_range_check_(idx_insert); Matrix mat_inserted = *this; mat_inserted.mat_.insert( mat_inserted.mat_.begin() + to_positive_index_(idx_insert), mat_insert.mat_.begin(), mat_insert.mat_.end()); return mat_inserted; } else if (dim_insert == 1) { if (mat_insert.shape()[0] != shape()[0]) { std::string err_msg = "Inconsistent insert shape: insert size " + std::to_string(mat_insert.shape()[0]) + " != number of rows " + std::to_string(shape()[0]) + "."; throw MatrixException(err_msg); } Matrix mat_inserted = *this; for (size_type idx_row = 0; idx_row < mat_inserted.shape()[0]; ++idx_row) { mat_inserted.mat_[idx_row] = mat_inserted.mat_[idx_row].insert( mat_insert.mat_[idx_row], idx_insert); } return mat_inserted; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_insert) + " != 0 or 1."; throw MatrixException(err_msg); } } template <typename Tp> Matrix<Tp> Matrix<Tp>::remove(const dimension_type& dim_remove, const index_type& idx_remove) const { if (dim_remove == 0) { exclusive_range_check_(idx_remove); Matrix mat_removed = *this; mat_removed.mat_.erase(mat_removed.mat_.begin() + to_positive_index_(idx_remove)); return mat_removed; } else if (dim_remove == 1) { Matrix mat_removed = *this; for (size_type idx_row = 0; idx_row < mat_removed.shape()[0]; ++idx_row) { mat_removed.mat_[idx_row] = mat_removed.mat_[idx_row].remove(idx_remove); } return mat_removed; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_remove) + " != 0 or 1."; throw MatrixException(err_msg); } } template <typename Tp> Matrix<Tp> Matrix<Tp>::remove(const dimension_type& dim_remove, const index_type& idx_begin, const index_type& idx_end) const { if (dim_remove == 0) { exclusive_range_check_(idx_begin); inclusive_range_check_(idx_end); index_order_check_(idx_begin, idx_end); Matrix mat_removed = *this; mat_removed.mat_.erase( mat_removed.mat_.begin() + to_positive_index_(idx_begin), mat_removed.mat_.begin() + to_positive_index_(idx_end)); return mat_removed; } else if (dim_remove == 1) { Matrix mat_removed = *this; for (size_type idx_row = 0; idx_row < mat_removed.shape()[0]; ++idx_row) { mat_removed.mat_[idx_row] = mat_removed.mat_[idx_row].remove( idx_begin, idx_end); } return mat_removed; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_remove) + " != 0 or 1."; throw MatrixException(err_msg); } } template <typename Tp> Matrix<Tp> Matrix<Tp>::replace(const Vector<Tp>& vec_replace, const dimension_type& dim_replace, const index_type& idx_row_begin, const index_type& idx_col_begin) const { if (dim_replace == 0) { exclusive_range_check_(idx_row_begin); Matrix mat_replaced = *this; mat_replaced[idx_row_begin] = mat_replaced[idx_row_begin].replace( vec_replace, idx_col_begin); return mat_replaced; } else if (dim_replace == 1) { exclusive_range_check_(idx_row_begin); size_type idx_row_begin_p = to_positive_index_(idx_row_begin); Matrix mat_replaced = *this; for (size_type idx_row = 0; idx_row < vec_replace.shape()[0] && idx_row_begin_p + idx_row < mat_replaced.shape()[0]; ++idx_row) { mat_replaced.mat_[idx_row_begin_p + idx_row] = mat_replaced.mat_[idx_row_begin_p + idx_row].replace( vec_replace[idx_row], idx_col_begin); } return mat_replaced; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_replace) + " != 0 or 1."; throw MatrixException(err_msg); } } template <typename Tp> Matrix<Tp> Matrix<Tp>::replace(const Matrix& mat_replace, const index_type& idx_row_begin, const index_type& idx_col_begin) const { exclusive_range_check_(idx_row_begin); size_type idx_row_begin_p = to_positive_index_(idx_row_begin); Matrix mat_replaced = *this; for (size_type idx_row = 0; idx_row < mat_replace.shape()[0] && idx_row_begin_p + idx_row < mat_replaced.shape()[0]; ++idx_row) { mat_replaced.mat_[idx_row_begin_p + idx_row] = mat_replaced.mat_[idx_row_begin_p + idx_row].replace( mat_replace[idx_row], idx_col_begin); } return mat_replaced; } // namespace internal namespace internal { template <typename Tp> void transpose(const Matrix<Tp>& mat_from, Matrix<Tp>& mat_to) { typename Matrix<Tp>::size_type num_rows = mat_to.shape()[0], num_cols = mat_to.shape()[1], size_c_to_c_omp = 8; const Vector<Tp>* ptr_row_from = &mat_from[0]; Vector<Tp>* ptr_row_to = &mat_to[0]; if (num_rows + num_cols < 2 * size_c_to_c_omp) { for (typename Matrix<Tp>::size_type idx_col = 0; idx_col < num_cols; ++idx_col) { const Tp* ptr_col_from = &ptr_row_from[idx_col][0]; for (typename Matrix<Tp>::size_type idx_row = 0; idx_row < num_rows; ++idx_row) { Tp* ptr_col_to = &ptr_row_to[idx_row][0]; ptr_col_to[idx_col] = ptr_col_from[idx_row]; } } } else { #pragma omp parallel for shared(ptr_row_from, ptr_row_to) \ schedule(auto) collapse(2) for (typename Matrix<Tp>::size_type idx_col = 0; idx_col < num_cols; ++idx_col) { for (typename Matrix<Tp>::size_type idx_row = 0; idx_row < num_rows; ++idx_row) { const Tp* ptr_col_from = &ptr_row_from[idx_col][0]; Tp* ptr_col_to = &ptr_row_to[idx_row][0]; ptr_col_to[idx_col] = ptr_col_from[idx_row]; } } } } } // namespace internal template <typename Tp> Matrix<Tp> Matrix<Tp>::transpose() const { Matrix mat_t(shape()[1], shape()[0]); internal::transpose(*this, mat_t); return mat_t; } template <typename Tp> Matrix<Tp> Matrix<Tp>::T() const { return transpose(); } template <typename Tp> Matrix<Tp> Matrix<Tp>::reshape(const size_type& num_rows, const size_type& num_cols) const { Vector<Tp> vec_cache(num_rows * num_cols); bool is_end_of_vec_cache = false; typename Vector<Tp>::size_type idx_vec_cache = 0; for (size_type idx_row = 0; idx_row < shape()[0]; ++idx_row) { for (size_type idx_col = 0; idx_col < shape()[1]; ++idx_col) { vec_cache[idx_vec_cache] = mat_[idx_row][idx_col]; ++idx_vec_cache; if (idx_vec_cache >= vec_cache.shape()[0]) { is_end_of_vec_cache = true; break; } } if (is_end_of_vec_cache) { break; } } Matrix<Tp> mat_reshaped(num_rows, num_cols); idx_vec_cache = 0; for (index_type idx_row = 0; idx_row < mat_reshaped.shape()[0]; ++idx_row) { for (index_type idx_col = 0; idx_col < mat_reshaped.shape()[1]; ++idx_col) { mat_reshaped.mat_[idx_row][idx_col] = vec_cache[idx_vec_cache]; ++idx_vec_cache; } } return mat_reshaped; } template <typename Tp> Matrix<Tp> Matrix<Tp>::shuffle() const { std::random_device rd; std::default_random_engine gen(rd()); Matrix mat_shuffled = *this; std::shuffle(mat_shuffled.mat_.begin(), mat_shuffled.mat_.end(), gen); return mat_shuffled; } // ======== Arithmetic ======== namespace internal { #define OMP_FOR_3_MAT \ _Pragma("omp parallel for shared(mat_lhs, mat_rhs, mat_ans) schedule(auto)") #define OMP_FOR_2_MAT_L_ANS \ _Pragma("omp parallel for shared(mat_lhs, val_rhs, mat_ans) schedule(auto)") #define OMP_FOR_2_MAT_R_ANS \ _Pragma("omp parallel for shared(val_lhs, mat_rhs, mat_ans) schedule(auto)") #define MAT_OP_MAT(OPERATION, OPERATOR) \ template <typename OpT> \ void OPERATION(const Matrix<OpT>& mat_lhs, const Matrix<OpT>& mat_rhs, \ Matrix<OpT>& mat_ans) { \ OMP_FOR_3_MAT \ for (typename Matrix<OpT>::size_type idx_row = 0; \ idx_row < mat_ans.shape()[0]; ++idx_row) { \ mat_ans[idx_row] = (mat_lhs[idx_row] OPERATOR mat_rhs[idx_row]); \ } \ } MAT_OP_MAT(add, +) MAT_OP_MAT(sub, -) MAT_OP_MAT(mul, *) MAT_OP_MAT(div, /) #define MAT_OP_SCA(OPERATION, OPERATOR) \ template <typename OpT> \ void OPERATION(const Matrix<OpT>& mat_lhs, const OpT& val_rhs, \ Matrix<OpT>& mat_ans) { \ OMP_FOR_2_MAT_L_ANS \ for (typename Matrix<OpT>::size_type idx_row = 0; \ idx_row < mat_ans.shape()[0]; ++idx_row) { \ mat_ans[idx_row] = (mat_lhs[idx_row] OPERATOR val_rhs); \ } \ } MAT_OP_SCA(add, +) MAT_OP_SCA(sub, -) MAT_OP_SCA(mul, *) MAT_OP_SCA(div, /) #define SCA_OP_MAT(OPERATION, OPERATOR) \ template <typename OpT> \ void OPERATION(const OpT& val_lhs, const Matrix<OpT>& mat_rhs, \ Matrix<OpT>& mat_ans) { \ OMP_FOR_2_MAT_R_ANS \ for (typename Matrix<OpT>::size_type idx_row = 0; \ idx_row < mat_ans.shape()[0]; ++idx_row) { \ mat_ans[idx_row] = (val_lhs OPERATOR mat_rhs[idx_row]); \ } \ } SCA_OP_MAT(add, +) SCA_OP_MAT(sub, -) SCA_OP_MAT(mul, *) SCA_OP_MAT(div, /) } // namespace internal template <typename AriT> Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<AriT> mat_sum(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::add(mat_lhs, mat_rhs, mat_sum); return mat_sum; } template <typename AriT> Matrix<AriT> operator+(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) { Matrix<AriT> mat_sum(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::add(mat_lhs, val_rhs, mat_sum); return mat_sum; } template <typename AriT> Matrix<AriT> operator+(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT> mat_sum(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::add(val_lhs,mat_rhs, mat_sum); return mat_sum; } template <typename AriT> Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<AriT> mat_diff(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::sub(mat_lhs, mat_rhs, mat_diff); return mat_diff; } template <typename AriT> Matrix<AriT> operator-(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) { Matrix<AriT> mat_diff(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::sub(mat_lhs, val_rhs, mat_diff); return mat_diff; } template <typename AriT> Matrix<AriT> operator-(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT> mat_diff(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::sub(val_lhs, mat_rhs, mat_diff); return mat_diff; } // namespace internal namespace internal { template <typename MatT> void mat_mul(const Matrix<MatT>& mat_lhs, const Matrix<MatT>& mat_rhs, Matrix<MatT>& mat_ans) { typename Matrix<MatT>::size_type num_rows = mat_ans.shape()[0], num_cols = mat_ans.shape()[1]; Matrix<MatT> mat_rhs_t = mat_rhs.transpose(); const Vector<MatT>* ptr_row_lhs = &mat_lhs[0]; const Vector<MatT>* ptr_row_rhs = &mat_rhs_t[0]; Vector<MatT>* ptr_row_ans = &mat_ans[0]; #pragma omp parallel for shared(ptr_row_lhs, ptr_row_rhs, ptr_row_ans) \ schedule(auto) collapse(2) for (typename Matrix<MatT>::size_type idx_row = 0; idx_row < num_rows; ++idx_row) { for (typename Matrix<MatT>::size_type idx_col = 0; idx_col < num_cols; ++idx_col) { *(&ptr_row_ans[idx_row][0] + idx_col) += (ptr_row_lhs[idx_row] * ptr_row_rhs[idx_col]).sum(); } } } } // namespace internal template <typename AriT> Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs) { if (mat_lhs.shape()[1] != mat_rhs.shape()[0]) { std::string err_msg = "Inconsistent shape for matrix multiplication: " + std::to_string(mat_lhs.shape()[1]) + " != " + std::to_string(mat_rhs.shape()[0]) + "."; throw MatrixException(err_msg); } Matrix<AriT> mat_prod(mat_lhs.shape()[0], mat_rhs.shape()[1]); internal::mat_mul(mat_lhs, mat_rhs, mat_prod); return mat_prod; } template <typename AriT> Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) { Matrix<AriT> mat_prod(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::mul(mat_lhs, val_rhs, mat_prod); return mat_prod; } template <typename AriT> Matrix<AriT> operator*(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT> mat_prod(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::mul(val_lhs, mat_rhs, mat_prod); return mat_prod; } template <typename AriT> Matrix<AriT> operator*(const Matrix<AriT>& mat_lhs, const Vector<AriT>& vec_rhs) { Matrix<AriT> mat_rhs(vec_rhs); return operator*(mat_lhs, mat_rhs); } template <typename AriT> Matrix<AriT> operator*(const Vector<AriT>& vec_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT> mat_lhs(vec_lhs); return operator*(mat_lhs, mat_rhs); } template <typename AriT> Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<AriT> mat_quot(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::div(mat_lhs, mat_rhs, mat_quot); return mat_quot; } template <typename AriT> Matrix<AriT> operator/(const Matrix<AriT>& mat_lhs, const AriT& val_rhs) { Matrix<AriT> mat_quot(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::div(mat_lhs, val_rhs, mat_quot); return mat_quot; } template <typename AriT> Matrix<AriT> operator/(const AriT& val_lhs, const Matrix<AriT>& mat_rhs) { Matrix<AriT> mat_quot(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::div(val_lhs, mat_rhs, mat_quot); return mat_quot; } template <typename Tp> void Matrix<Tp>::operator+=(const Matrix<Tp>& mat_rhs) { (*this) = (*this) + mat_rhs; } template <typename Tp> void Matrix<Tp>::operator+=(const Tp& val_rhs) { (*this) = (*this) + val_rhs; } template <typename Tp> void Matrix<Tp>::operator-=(const Matrix<Tp>& mat_rhs) { (*this) = (*this) - mat_rhs; } template <typename Tp> void Matrix<Tp>::operator-=(const Tp& val_rhs) { (*this) = (*this) - val_rhs; } template <typename Tp> void Matrix<Tp>::operator*=(const Matrix<Tp>& mat_rhs) { (*this) = (*this) * mat_rhs; } template <typename Tp> void Matrix<Tp>::operator*=(const Tp& val_rhs) { (*this) = (*this) * val_rhs; } template <typename Tp> void Matrix<Tp>::operator/=(const Matrix<Tp>& mat_rhs) { (*this) = (*this) / mat_rhs; } template <typename Tp> void Matrix<Tp>::operator/=(const Tp& val_rhs) { (*this) = (*this) / val_rhs; } template <typename Tp> Matrix<Tp> Matrix<Tp>::times(const Matrix& mat_rhs) const { shape_consistence_check_(shape(), mat_rhs.shape()); Matrix mat_prod(shape()[0], shape()[1]); internal::mul(*this, mat_rhs, mat_prod); return mat_prod; } template <typename Tp> Tp Matrix<Tp>::sum() const { Tp sum_val = Tp(); #pragma omp parallel for schedule(auto) reduction(+ : sum_val) for (size_type idx_row = 0; idx_row < shape()[0]; ++idx_row) { sum_val = sum_val + mat_[idx_row].sum(); } return sum_val; } // namespace internal namespace internal { template <typename SumT> void sum_of_dim_one(const Matrix<SumT>& mat, Vector<SumT>& vec_sum) { for (typename Matrix<SumT>::size_type idx_row = 0; idx_row < mat.shape()[0]; ++idx_row) { vec_sum[idx_row] = mat[idx_row].sum(); } } } // namespace internal template <typename Tp> Vector<Tp> Matrix<Tp>::sum(const dimension_type& dim_sum) const { if (dim_sum == 0) { Vector<Tp> vec_sum(shape()[1]); internal::sum_of_dim_one(transpose(), vec_sum); return vec_sum; } else if (dim_sum == 1) { Vector<Tp> vec_sum(shape()[0]); internal::sum_of_dim_one(*this, vec_sum); return vec_sum; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_sum) + " != 0 or 1."; throw MatrixException(err_msg); } } // ======== Comparisons ======== namespace internal { MAT_OP_MAT(eq, ==) MAT_OP_MAT(ne, !=) MAT_OP_MAT(lt, <) MAT_OP_MAT(le, <=) MAT_OP_MAT(gt, >) MAT_OP_MAT(ge, >=) MAT_OP_SCA(eq, ==) MAT_OP_SCA(ne, !=) MAT_OP_SCA(lt, <) MAT_OP_SCA(le, <=) MAT_OP_SCA(gt, >) MAT_OP_SCA(ge, >=) SCA_OP_MAT(eq, ==) SCA_OP_MAT(ne, !=) SCA_OP_MAT(lt, <) SCA_OP_MAT(le, <=) SCA_OP_MAT(gt, >) SCA_OP_MAT(ge, >=) #undef MAT_OP_MAT #undef MAT_OP_SCA #undef SCA_OP_MAT } // namespace internal template <typename CmpT> Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<CmpT> mat_eq(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::eq(mat_lhs, mat_rhs, mat_eq); return mat_eq; } template <typename CmpT> Matrix<CmpT> operator==(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) { Matrix<CmpT> mat_eq(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::eq(mat_lhs, val_rhs, mat_eq); return mat_eq; } template <typename CmpT> Matrix<CmpT> operator==(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT> mat_eq(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::eq(val_lhs, mat_rhs, mat_eq); return mat_eq; } template <typename CmpT> Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<CmpT> mat_ne(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::ne(mat_lhs, mat_rhs, mat_ne); return mat_ne; } template <typename CmpT> Matrix<CmpT> operator!=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) { Matrix<CmpT> mat_ne(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::ne(mat_lhs, val_rhs, mat_ne); return mat_ne; } template <typename CmpT> Matrix<CmpT> operator!=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT> mat_ne(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::ne(val_lhs, mat_rhs, mat_ne); return mat_ne; } template <typename CmpT> Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<CmpT> mat_lt(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::lt(mat_lhs, mat_rhs, mat_lt); return mat_lt; } template <typename CmpT> Matrix<CmpT> operator<(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) { Matrix<CmpT> mat_lt(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::lt(mat_lhs, val_rhs, mat_lt); return mat_lt; } template <typename CmpT> Matrix<CmpT> operator<(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT> mat_lt(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::lt(val_lhs, mat_rhs, mat_lt); return mat_lt; } template <typename CmpT> Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<CmpT> mat_le(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::le(mat_lhs, mat_rhs, mat_le); return mat_le; } template <typename CmpT> Matrix<CmpT> operator<=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) { Matrix<CmpT> mat_le(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::le(mat_lhs, val_rhs, mat_le); return mat_le; } template <typename CmpT> Matrix<CmpT> operator<=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT> mat_le(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::le(val_lhs, mat_rhs, mat_le); return mat_le; } template <typename CmpT> Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<CmpT> mat_gt(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::gt(mat_lhs, mat_rhs, mat_gt); return mat_gt; } template <typename CmpT> Matrix<CmpT> operator>(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) { Matrix<CmpT> mat_gt(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::gt(mat_lhs, val_rhs, mat_gt); return mat_gt; } template <typename CmpT> Matrix<CmpT> operator>(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT> mat_gt(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::gt(val_lhs, mat_rhs, mat_gt); return mat_gt; } template <typename CmpT> Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT>::shape_consistence_check_(mat_lhs.shape(), mat_rhs.shape()); Matrix<CmpT> mat_ge(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::ge(mat_lhs, mat_rhs, mat_ge); return mat_ge; } template <typename CmpT> Matrix<CmpT> operator>=(const Matrix<CmpT>& mat_lhs, const CmpT& val_rhs) { Matrix<CmpT> mat_ge(mat_lhs.shape()[0], mat_lhs.shape()[1]); internal::ge(mat_lhs, val_rhs, mat_ge); return mat_ge; } template <typename CmpT> Matrix<CmpT> operator>=(const CmpT& val_lhs, const Matrix<CmpT>& mat_rhs) { Matrix<CmpT> mat_ge(mat_rhs.shape()[0], mat_rhs.shape()[1]); internal::ge(val_lhs, mat_rhs, mat_ge); return mat_ge; } template <typename Tp> bool Matrix<Tp>::equal(const Matrix& mat_rhs, std::size_t ulp) { if (shape()[0] != mat_rhs.shape()[0] || shape()[1] != mat_rhs.shape()[1]) { return false; } for (size_type idx_row = 0; idx_row < shape()[0]; ++idx_row) { if (mat_[idx_row].nequal(mat_rhs.mat_[idx_row])) { return false; } } return true; } template <typename Tp> bool Matrix<Tp>::nequal(const Matrix& mat_rhs, std::size_t ulp) { return !equal(mat_rhs, ulp); } template <typename Tp> Tp Matrix<Tp>::max() const { Tp max_element = mat_[0].max(); for (size_type idx_row = 1; idx_row < shape()[0]; ++idx_row) { if (mat_[idx_row].max() > max_element) { max_element = mat_[idx_row].max(); } } return max_element; } // namespace internal namespace internal { template <typename MaxT> void max_of_dim_one(const Matrix<MaxT>& mat, Vector<MaxT>& max_vec) { for (typename Matrix<MaxT>::size_type idx_row = 0; idx_row < mat.shape()[0]; ++idx_row) { max_vec[idx_row] = mat[idx_row].max(); } } template <typename MinT> void min_of_dim_one(const Matrix<MinT>& mat, Vector<MinT>& min_vec) { for (typename Matrix<MinT>::size_type idx_row = 0; idx_row < mat.shape()[0]; ++idx_row) { min_vec[idx_row] = mat[idx_row].min(); } } } // namespace internal template <typename Tp> Vector<Tp> Matrix<Tp>::max(const dimension_type& dim_max) const { if (dim_max == 0) { Vector<Tp> max_vec(shape()[1]); internal::max_of_dim_one(transpose(), max_vec); } else if (dim_max == 1) { Vector<Tp> max_vec(shape()[0]); internal::max_of_dim_one(*this, max_vec); return max_vec; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_max) + " != 0 or 1."; throw MatrixException(err_msg); } } template <typename Tp> Tp Matrix<Tp>::min() const { Tp min_element = mat_[0].min(); for (size_type idx_row = 1; idx_row < shape()[0]; ++idx_row) { if (mat_[idx_row].min() < min_element) { min_element = mat_[idx_row].min(); } } return min_element; } template <typename Tp> Vector<Tp> Matrix<Tp>::min(const dimension_type& dim_min) const { if (dim_min == 0) { Vector<Tp> min_vec(shape()[1]); internal::min_of_dim_one(transpose(), min_vec); return min_vec; } else if (dim_min == 1) { Vector<Tp> min_vec(shape()[0]); internal::min_of_dim_one(*this, min_vec); return min_vec; } else { std::string err_msg = "Invalid Dimension: " + std::to_string(dim_min) + " != 0 or 1."; throw MatrixException(err_msg); } } // ======== IO ======== template <typename MatT, typename CharT, typename Traits> std::basic_ostream<CharT, Traits>& operator<<( std::basic_ostream<CharT, Traits>& os, const Matrix<MatT>& mat) { if (mat.shape()[0] == 0) { os << "[[]]"; return os; } if (mat.shape()[0] == 1) { os << "[" << mat.mat_[0] << "]"; return os; } os << "[" << mat.mat_[0] << "\n"; for (typename Matrix<MatT>::size_type idx_row = 1; idx_row < mat.shape()[0] - 1; ++idx_row) { os << " " << mat.mat_[idx_row] << "\n"; } os << " " << mat.mat_[mat.shape()[0] - 1] << "]"; return os; } template <typename MatT, typename CharT, typename Traits> std::basic_istream<CharT, Traits>& operator>>( std::basic_istream<CharT, Traits>& is, Matrix<MatT>& mat) { for (typename Matrix<MatT>::size_type idx_row = 0; idx_row < mat.shape()[0]; ++idx_row) { is >> mat.mat_[idx_row]; } return is; } // ======== Helper Functions ======== template <typename Tp> typename Matrix<Tp>::index_type Matrix<Tp>::to_positive_index_( const size_type& size, const index_type& index) { return index >= 0 ? index : size + index; } template <typename Tp> void Matrix<Tp>::exclusive_range_check_(const size_type& size, const index_type& index) { size_type pos_index = to_positive_index_(size, index); if (pos_index >= size) { std::string err_msg = "Out-of-Range: row index " + std::to_string(index) + " is out of range [0, " + std::to_string(size) + ")."; throw MatrixException(err_msg); } } template <typename Tp> void Matrix<Tp>::exclusive_range_check_(const iterator& it_begin, const iterator& it_end, const iterator& it) { if (it < it_begin || it >= it_end) { std::string err_msg = "Out-of-Range: row iterator is out of the range [begin(), end())."; throw MatrixException(err_msg); } } template <typename Tp> void Matrix<Tp>::exclusive_range_check_(const const_iterator& cit_begin, const const_iterator& cit_end, const const_iterator& cit) { if (cit < cit_begin || cit >= cit_end) { std::string err_msg = "Out-of-Range: row const_iterator is out of the range [begin(), end())."; throw MatrixException(err_msg); } } template <typename Tp> void Matrix<Tp>::inclusive_range_check_(const size_type& size, const index_type& index) { size_type pos_index = to_positive_index_(size, index); if (pos_index > size) { std::string err_msg = "Out-of-Range: row index " + std::to_string(index) + " is out of range [0, " + std::to_string(size) + "]."; throw MatrixException(err_msg); } } template <typename Tp> void Matrix<Tp>::inclusive_range_check_(const iterator& it_begin, const iterator& it_end, const iterator& it) { if (it < it_begin || it > it_end) { std::string err_msg = "Out-of-Range: row iterator is out of the range [begin(), end()]."; throw MatrixException(err_msg); } } template <typename Tp> void Matrix<Tp>::inclusive_range_check_(const const_iterator& cit_begin, const const_iterator& cit_end, const const_iterator& cit) { if (cit < cit_begin || cit > cit_end) { std::string err_msg = "Out-of-Range: row const_iterator is out of the range [cbegin(), cend()]."; throw MatrixException(err_msg); } } template <typename Tp> void Matrix<Tp>::shape_consistence_check_(const Vector<size_type>& shape_lhs, const Vector<size_type>& shape_rhs) { if (shape_lhs[0] != shape_rhs[0] || shape_lhs[1] != shape_rhs[1]) { std::string err_msg = "Inconsistent shape: [" + std::to_string(shape_lhs[0]) + ", " + std::to_string(shape_lhs[1]) + "] != [" + std::to_string(shape_rhs[0]) + ", " + std::to_string(shape_rhs[1]) + "]."; throw MatrixException(err_msg); } } template <typename Tp> void Matrix<Tp>::index_order_check_(const size_type& size, const index_type& idx_begin, const index_type& idx_end) { if (to_positive_index_(size, idx_begin) > to_positive_index_(size, idx_end)) { std::string err_msg = "Invalid Row Index Order: begin " + std::to_string(to_positive_index_(size, idx_begin)) + " > end " + std::to_string(to_positive_index_(size, idx_end)) + "."; throw MatrixException(err_msg); } } template <typename Tp> typename Matrix<Tp>::index_type Matrix<Tp>::to_positive_index_( const index_type& index) const { return to_positive_index_(mat_.size(), index); } template <typename Tp> void Matrix<Tp>::exclusive_range_check_(const index_type& index) const { exclusive_range_check_(mat_.size(), index); } template <typename Tp> void Matrix<Tp>::exclusive_range_check_(const iterator& it) { exclusive_range_check_(begin(), end(), it); } template <typename Tp> void Matrix<Tp>::exclusive_range_check_(const const_iterator& cit) const { exclusive_range_check_(cbegin(), cend(), cit); } template <typename Tp> void Matrix<Tp>::inclusive_range_check_(const index_type& index) const { inclusive_range_check_(mat_.size(), index); } template <typename Tp> void Matrix<Tp>::inclusive_range_check_(const iterator& it) { inclusive_range_check_(begin(), end(), it); } template <typename Tp> void Matrix<Tp>::inclusive_range_check_(const const_iterator& cit) const { inclusive_range_check_(cbegin(), cend(), cit); } template <typename Tp> void Matrix<Tp>::index_order_check_(const index_type& idx_begin, const index_type& idx_end) const { index_order_check_(mat_.size(), idx_begin, idx_end); } template <typename Tp> void Matrix<Tp>::iterator_order_check_(const iterator& it_begin, const iterator& it_end) { index_order_check_(mat_.size(), static_cast<index_type>(it_begin - mat_.begin()), static_cast<index_type>(it_end - mat_.begin())); } template <typename Tp> void Matrix<Tp>::const_iterator_order_check_( const const_iterator& cit_begin, const const_iterator& cit_end) const { index_order_check_(mat_.size(), static_cast<index_type>(cit_begin - mat_.cbegin()), static_cast<index_type>(cit_end - mat_.cbegin())); } // ======== ENd of class Vector ======== class MatrixException { public: MatrixException() noexcept {}; MatrixException(const MatrixException& other) noexcept : msg_(other.msg_) {} explicit MatrixException(const std::string& message) noexcept : msg_(message) {} explicit MatrixException(const char* message) noexcept : msg_(message) {} MatrixException& operator=(const MatrixException& other) noexcept { msg_ = other.msg_; return *this; } MatrixException& operator=(const std::string& msg_copy) noexcept { msg_ = msg_copy; return *this; } MatrixException& operator=(const char* msg_copy) noexcept { msg_ = msg_copy; return *this; } ~MatrixException() noexcept {}; const char* what() const noexcept { return msg_.c_str(); } protected: std::string msg_; }; } // namespace tensor #endif // TENSOR_MATRIX_H_
GB_unaryop__ainv_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_uint64 // op(A') function: GB_tran__ainv_fp32_uint64 // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_uint64 ( float *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ScCleanseInput.c
/*@file Provides functions to cleanse text snippets. Copyright (C) 2020 Marvin Häuser. All rights reserved. SPDX-License-Identifier: BSD-3-Clause */ #include <assert.h> #include <string.h> #include <stddef.h> #include <ScCleanseInput.h> #include <ScStringMisc.h> /* Cleanse the current line from Buffer. @param[in,out] Buffer The buffer to cleanse. It does not need to be terminated. @param[in,out] BufferLength On input, the length of the string in Buffer. On output, the input value minus the distance from the returned pointer to Buffer. @returns A pointer to the new line character in Buffer. */ static char *ScCleanseDropLine( char *Buffer, size_t *BufferLength ) { assert(BufferLength != NULL); assert(Buffer != NULL || *BufferLength == 0); // // When a single line comment is encountered, cleanse all characters till the // end of the line. // for (; *BufferLength > 0; --*BufferLength, ++Buffer) { // // The comment is terminated by a new line. // if (*Buffer == '\n') { break; } *Buffer = ' '; } // // *BufferLength is kept consistent for all operations above. // return Buffer; } /* Cleanse a multi line comment from Buffer. @param[in,out] Buffer The buffer to cleanse. It does need to be terminated. Must not overlap with CommentEnd. @param[in,out] BufferLength On input, the length of the string in Buffer. On output, the input value minus the distance from the returned pointer to Buffer. @param[in] CommentEnd The sequence that ends a multi line comment. Must not overlap with Buffer. @returns A pointer to the character in Buffer after the end of the comment. */ static char *ScCleanseMultiComment( char * restrict Buffer, size_t *BufferLength, const sc_lenghted_string_t *CommentEnd ) { assert(BufferLength != NULL); assert(Buffer != NULL || *BufferLength == 0); assert(CommentEnd != NULL); // // When a multi line comment is encountered, cleanse all characters till the // end of the comment while omiting new line characters. // for (; *BufferLength > 0; --*BufferLength, ++Buffer) { // // If the multi line comment terminator is encountered, cleanse it and exit. // const int Prefixed = ScStrnPrefix( Buffer, CommentEnd->String, *BufferLength, CommentEnd->Length ); if (Prefixed == 0) { for (size_t Index = CommentEnd->Length; Index > 0; --Index) { *Buffer = ' '; ++Buffer; } *BufferLength -= CommentEnd->Length; break; } *Buffer = ' '; } // // *BufferLength is kept consistent for all operations above. // return Buffer; } void ScCleanseWhitespacesInLines( char *Buffer, size_t BufferLength, const sc_cleanse_config_t *Config ) { assert(Buffer != NULL || BufferLength == 0); // // Use a temporary buffer for PreceedingNewLine when not present to improve // performance by avoiding NULL checks. // char PreceedingNewLineTmp; char *PreceedingNewLine = &PreceedingNewLineTmp; const char *const BufferTop = Buffer + BufferLength; for (; Buffer < BufferTop; ++Buffer, --BufferLength) { // // Cleanse whitespaces that do not denote a new line to space. // Cleanse line feed to new line to account for Windows-style and similar // line endings. Remove new lines that are only superseeded only by // whitespaces before another new line. // switch(*Buffer) { case '\v': case '\t': { *Buffer = ' '; // // Fall-through to space handling. // } case ' ': { // // Do not alter previous character information for whitespaces, pretend // they do not exist. // break; } default: { // // Check whether the unmatched character is to be treated as a new line. // size_t Index; for (Index = 0; Index < Config->NumNewLineChars; ++Index) { if (*Buffer == Config->NewLineChars[Index]) { break; } } if (Index == Config->NumNewLineChars) { // // A non-whitespace has been encountered, clear previous new line. // PreceedingNewLine = &PreceedingNewLineTmp; break; } // // A character that is to be treated as a new line as been encountered, // fall-through to normalisation and new line handling. // } case '\r': { *Buffer = '\n'; // // Fall-through to new line handling. // } case '\n': { // // Remove the previous new line (only serparated by arbitrarily many // whitespaces from the current one) and remember the current one. // *PreceedingNewLine = ' '; PreceedingNewLine = Buffer; break; } } } // // Clear the trailing new line character if existent. // *PreceedingNewLine = ' '; } void ScCleanseLines( char *Buffer, size_t BufferLength, const sc_cleanse_config_t *Config ) { assert(Buffer != NULL || BufferLength == 0); assert(Config != NULL); const char *const BufferTop = Buffer + BufferLength; while (Buffer < BufferTop) { // // Cleanse constructs like comments according to Config. // First, check for prefixes to drop the rest of the line. // size_t DropIndex; for (DropIndex = 0; DropIndex < Config->NumLineDropPrefixes; ++DropIndex) { const int Prefixed = ScStrnPrefix( Buffer, Config->LineDropPrefixes[DropIndex].String, BufferLength, Config->LineDropPrefixes[DropIndex].Length ); if (Prefixed == 0) { Buffer = ScCleanseDropLine(Buffer, &BufferLength); break; } } // // If no prefix to drop the rest of the line has been encountered, check // whether this is a multi line comment. // if (DropIndex == Config->NumLineDropPrefixes) { const int Prefixed = ScStrnPrefix( Buffer, Config->MultiCommentStart.String, BufferLength, Config->MultiCommentStart.Length ); // // The check for length is to account for the unknown configuration not // providing information on multi line comments. // if (Prefixed == 0 && Config->MultiCommentStart.Length > 0) { // // Delete comment start first so constructs like /*/ do not cause early // aborts. // const char *const CommentTop = Buffer + Config->MultiCommentStart.Length; for (; Buffer < CommentTop; ++Buffer, --BufferLength) { *Buffer = ' '; } Buffer = ScCleanseMultiComment( Buffer, &BufferLength, &Config->MultiCommentEnd ); } else { // // No known prefix has been encountered, advance to the next character. // ++Buffer; --BufferLength; } } // // All operations must ensure consistency of Buffer and BufferLength. // assert(Buffer + BufferLength == BufferTop); } } void ScCleanseGeneralisees( char *Buffer, size_t BufferLength, const sc_cleanse_config_t *Config ) { assert(Buffer != NULL || BufferLength == 0); assert(Config != NULL); assert(Config->Generalises != NULL || Config->NumGeneralises == 0); // // Check the precondition of generalisees needing to be at least as long as // generalisers. // for (size_t Index = 0; Index < Config->NumGeneralises; ++Index) { const sc_cleanse_generalise_t *Generalise = &Config->Generalises[Index]; for (size_t Index2 = 0; Index2 < Generalise->NumGeneralisees; ++Index2) { assert(Generalise->Generaliser.Length <= Generalise->Generalisees[Index2].Length); } } for (; BufferLength > 0; ++Buffer, --BufferLength) { // // Check for occurences of all generalisees. // for (size_t GenIndex = 0; GenIndex < Config->NumGeneralises; ++GenIndex) { const sc_lenghted_string_t *Generaliser = &Config->Generalises[GenIndex].Generaliser; size_t Index2; for (Index2 = 0; Index2 < Config->Generalises[GenIndex].NumGeneralisees; ++Index2) { const sc_lenghted_string_t *Generalisee = &Config->Generalises[GenIndex].Generalisees[Index2]; const int GenPrefix = ScStrnPrefix( Buffer, Generalisee->String, BufferLength, Generalisee->Length ); if (GenPrefix == 0) { // // Copy the generaliser string over the generalisee occurence. // memcpy(Buffer, Generaliser->String, Generaliser->Length); Buffer += Generaliser->Length; // // Fill up the trailer with spaces. // #pragma omp simd for ( size_t TrailIndex = 0; TrailIndex < Generalisee->Length - Generaliser->Length; ++TrailIndex ) { *Buffer = ' '; ++Buffer; } // // Account for next outside loop iteration's increment. // --Buffer; BufferLength -= Generalisee->Length - 1; break; } } if (Index2 < Config->Generalises[GenIndex].NumGeneralisees) { break; } } } } void ScCleanseRemoveSpaces( char *Buffer, size_t *BufferLength ) { assert(Buffer != NULL); assert(BufferLength != NULL); assert(*BufferLength > 0); size_t SourceIndex = 0; size_t TargetIndex = 0; // // Skip the following logic when Buffer starts with a new line. // if (Buffer[0] != '\n') { // // Find the first space character and store it as target to strip it. // Any prior character is a non-whitespace character and is preserved. // for (; TargetIndex < *BufferLength; ++TargetIndex) { if (Buffer[TargetIndex] == ' ') { break; } } // // When the first character is a space character, treat it the same way as // leading new lines outside of this if clause. // if (TargetIndex != 0) { if (TargetIndex == *BufferLength) { return; } // // There is at least one non-whitespace character, the last of which at // TargetIndex - 1. Skip the whitespace and start looking for further such // at TargetIndex + 1 in the loop below. // SourceIndex = TargetIndex + 1; } } // // When TargetIndex is 0, Buffer starts with either a space or a new line. // Advance SourceIndex to the first character that is not one of those to // strip the whitespace prefix. // if (TargetIndex == 0) { for (SourceIndex = 1; SourceIndex < *BufferLength; ++SourceIndex) { if (Buffer[SourceIndex] != ' ' && Buffer[SourceIndex] != '\n') { break; } } } for (; SourceIndex < *BufferLength; ++SourceIndex) { // // Found a new fragment to preserve. // if (Buffer[SourceIndex] != ' ') { size_t SourceIndexEnd; // // Find the end of the fragment. // for (SourceIndexEnd = SourceIndex + 1; SourceIndexEnd < *BufferLength; ++SourceIndexEnd) { if (Buffer[SourceIndexEnd] == ' ') { break; } } // // Strip the whitespaces prior to the fragment. // size_t ChunkSize = SourceIndexEnd - SourceIndex; memmove(&Buffer[TargetIndex], &Buffer[SourceIndex], ChunkSize); // // Advance source to the end of the fragment at its origin and target to // right after the copied fragment. // TargetIndex += ChunkSize; SourceIndex = SourceIndexEnd - 1; } } // // TargetIndex is equal to the amount of preserved characters. // *BufferLength = TargetIndex; } void ScCleanseInput( char *Buffer, size_t *BufferLength, const sc_cleanse_config_t *Config ) { assert(Buffer != NULL); assert(BufferLength != NULL); assert(*BufferLength > 0); assert(Config != NULL); ScCleanseLines(Buffer, *BufferLength, Config); ScCleanseGeneralisees(Buffer, *BufferLength, Config); ScCleanseWhitespacesInLines(Buffer, *BufferLength, Config); ScCleanseRemoveSpaces(Buffer, BufferLength); // // After cleansing there may not be empty lines. // for (size_t CharIndex = 1; CharIndex < *BufferLength; ++CharIndex) { assert(Buffer[CharIndex - 1] != '\n' || Buffer[CharIndex] != '\n'); } // // After cleansing there may not be a trailing new line. // if (*BufferLength > 0) { assert(Buffer[*BufferLength - 1] != '\n'); } }
loop_dispatch.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // XFAIL: gcc // GCC doesn't call runtime for static schedule #include "callback.h" #define WORK_SIZE 64 int main() { int i; int wait_s = 0; #pragma omp parallel num_threads(4) { int wait_id = 0; int team_size = omp_get_num_threads(); #pragma omp for schedule(static, WORK_SIZE / 4) for (i = 0; i < WORK_SIZE; i++) {} #pragma omp for schedule(dynamic) for (i = 0; i < WORK_SIZE; i++) { if (wait_id == 0) { // Wait until every thread has at least one iteration assigned OMPT_SIGNAL(wait_s); OMPT_WAIT(wait_s, team_size); wait_id++; } } #pragma omp for schedule(guided) for (i = 0; i < WORK_SIZE; i++) { if (wait_id == 1) { // Wait until every thread has at least one iteration assigned OMPT_SIGNAL(wait_s); OMPT_WAIT(wait_s, 2 * team_size); wait_id++; } } } return 0; } // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dispatch' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[THREAD_ID0:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]] // Each thread should have at least one ws-loop-chunk-begin event for each // for loop. // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID0:[0-9]+]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}} // CHECK: {{^}}[[THREAD_ID1:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID1:[0-9]+]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}} // CHECK: {{^}}[[THREAD_ID2:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID2:[0-9]+]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}} // CHECK: {{^}}[[THREAD_ID3:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID3:[0-9]+]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}}
GxB_Vector_iso.c
//------------------------------------------------------------------------------ // GxB_Vector_iso: report if a vector is iso-valued or not //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_Vector_iso // return iso status of a vector ( bool *iso, // true if the vector is iso-valued const GrB_Vector v // vector to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_Vector_iso (&iso, v)") ; GB_RETURN_IF_NULL (iso) ; GB_RETURN_IF_NULL_OR_FAULTY (v) ; ASSERT (GB_VECTOR_OK (v)) ; //-------------------------------------------------------------------------- // return the iso status of a vector //-------------------------------------------------------------------------- (*iso) = v->iso ; #pragma omp flush return (GrB_SUCCESS) ; }
GB_unop__log2_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log2_fp32_fp32) // op(A') function: GB (_unop_tran__log2_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = log2f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log2f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = log2f (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log2_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = log2f (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = log2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log2_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps(bias + p * 8) : _mm256_set1_ps(0.f); out.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr = out; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* kptr = kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr); __m256 _sum01 = _mm256_setzero_ps(); __m256 _sum10 = _mm256_loadu_ps(outptr + 8); __m256 _sum11 = _mm256_setzero_ps(); __m256 _r000 = _mm256_broadcast_ss(r0 + 0); __m256 _r001 = _mm256_broadcast_ss(r0 + 1); __m256 _r002 = _mm256_broadcast_ss(r0 + 2); __m256 _r003 = _mm256_broadcast_ss(r0 + 3); __m256 _r004 = _mm256_broadcast_ss(r0 + 4); __m256 _r005 = _mm256_broadcast_ss(r0 + 5); __m256 _r006 = _mm256_broadcast_ss(r0 + 6); __m256 _r007 = _mm256_broadcast_ss(r0 + 7); __m256 _k00 = _mm256_loadu_ps(kptr); __m256 _k01 = _mm256_loadu_ps(kptr + 8); __m256 _k02 = _mm256_loadu_ps(kptr + 16); __m256 _k03 = _mm256_loadu_ps(kptr + 24); __m256 _k04 = _mm256_loadu_ps(kptr + 32); __m256 _k05 = _mm256_loadu_ps(kptr + 40); __m256 _k06 = _mm256_loadu_ps(kptr + 48); __m256 _k07 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r000, _k00, _sum00); _sum01 = _mm256_fmadd_ps(_r001, _k01, _sum01); _sum00 = _mm256_fmadd_ps(_r002, _k02, _sum00); _sum01 = _mm256_fmadd_ps(_r003, _k03, _sum01); _sum00 = _mm256_fmadd_ps(_r004, _k04, _sum00); _sum01 = _mm256_fmadd_ps(_r005, _k05, _sum01); _sum00 = _mm256_fmadd_ps(_r006, _k06, _sum00); _sum01 = _mm256_fmadd_ps(_r007, _k07, _sum01); __m256 _r010 = _mm256_broadcast_ss(r0 + 8); __m256 _r011 = _mm256_broadcast_ss(r0 + 9); __m256 _r012 = _mm256_broadcast_ss(r0 + 10); __m256 _r013 = _mm256_broadcast_ss(r0 + 11); __m256 _r014 = _mm256_broadcast_ss(r0 + 12); __m256 _r015 = _mm256_broadcast_ss(r0 + 13); __m256 _r016 = _mm256_broadcast_ss(r0 + 14); __m256 _r017 = _mm256_broadcast_ss(r0 + 15); _sum10 = _mm256_fmadd_ps(_r010, _k00, _sum10); _sum11 = _mm256_fmadd_ps(_r011, _k01, _sum11); _sum10 = _mm256_fmadd_ps(_r012, _k02, _sum10); _sum11 = _mm256_fmadd_ps(_r013, _k03, _sum11); _sum10 = _mm256_fmadd_ps(_r014, _k04, _sum10); _sum11 = _mm256_fmadd_ps(_r015, _k05, _sum11); _sum10 = _mm256_fmadd_ps(_r016, _k06, _sum10); _sum11 = _mm256_fmadd_ps(_r017, _k07, _sum11); __m256 _k10 = _mm256_loadu_ps(kptr); __m256 _k11 = _mm256_loadu_ps(kptr + 8); __m256 _k12 = _mm256_loadu_ps(kptr + 16); __m256 _k13 = _mm256_loadu_ps(kptr + 24); __m256 _k14 = _mm256_loadu_ps(kptr + 32); __m256 _k15 = _mm256_loadu_ps(kptr + 40); __m256 _k16 = _mm256_loadu_ps(kptr + 48); __m256 _k17 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r010, _k10, _sum00); _sum01 = _mm256_fmadd_ps(_r011, _k11, _sum01); _sum00 = _mm256_fmadd_ps(_r012, _k12, _sum00); _sum01 = _mm256_fmadd_ps(_r013, _k13, _sum01); _sum00 = _mm256_fmadd_ps(_r014, _k14, _sum00); _sum01 = _mm256_fmadd_ps(_r015, _k15, _sum01); _sum00 = _mm256_fmadd_ps(_r016, _k16, _sum00); _sum01 = _mm256_fmadd_ps(_r017, _k17, _sum01); __m256 _r020 = _mm256_broadcast_ss(r0 + 16); __m256 _r021 = _mm256_broadcast_ss(r0 + 17); __m256 _r022 = _mm256_broadcast_ss(r0 + 18); __m256 _r023 = _mm256_broadcast_ss(r0 + 19); __m256 _r024 = _mm256_broadcast_ss(r0 + 20); __m256 _r025 = _mm256_broadcast_ss(r0 + 21); __m256 _r026 = _mm256_broadcast_ss(r0 + 22); __m256 _r027 = _mm256_broadcast_ss(r0 + 23); _sum10 = _mm256_fmadd_ps(_r020, _k10, _sum10); _sum11 = _mm256_fmadd_ps(_r021, _k11, _sum11); _sum10 = _mm256_fmadd_ps(_r022, _k12, _sum10); _sum11 = _mm256_fmadd_ps(_r023, _k13, _sum11); _sum10 = _mm256_fmadd_ps(_r024, _k14, _sum10); _sum11 = _mm256_fmadd_ps(_r025, _k15, _sum11); _sum10 = _mm256_fmadd_ps(_r026, _k16, _sum10); _sum11 = _mm256_fmadd_ps(_r027, _k17, _sum11); __m256 _k20 = _mm256_loadu_ps(kptr); __m256 _k21 = _mm256_loadu_ps(kptr + 8); __m256 _k22 = _mm256_loadu_ps(kptr + 16); __m256 _k23 = _mm256_loadu_ps(kptr + 24); __m256 _k24 = _mm256_loadu_ps(kptr + 32); __m256 _k25 = _mm256_loadu_ps(kptr + 40); __m256 _k26 = _mm256_loadu_ps(kptr + 48); __m256 _k27 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r020, _k20, _sum00); _sum01 = _mm256_fmadd_ps(_r021, _k21, _sum01); _sum00 = _mm256_fmadd_ps(_r022, _k22, _sum00); _sum01 = _mm256_fmadd_ps(_r023, _k23, _sum01); _sum00 = _mm256_fmadd_ps(_r024, _k24, _sum00); _sum01 = _mm256_fmadd_ps(_r025, _k25, _sum01); _sum00 = _mm256_fmadd_ps(_r026, _k26, _sum00); _sum01 = _mm256_fmadd_ps(_r027, _k27, _sum01); __m256 _r030 = _mm256_broadcast_ss(r0 + 24); __m256 _r031 = _mm256_broadcast_ss(r0 + 25); __m256 _r032 = _mm256_broadcast_ss(r0 + 26); __m256 _r033 = _mm256_broadcast_ss(r0 + 27); __m256 _r034 = _mm256_broadcast_ss(r0 + 28); __m256 _r035 = _mm256_broadcast_ss(r0 + 29); __m256 _r036 = _mm256_broadcast_ss(r0 + 30); __m256 _r037 = _mm256_broadcast_ss(r0 + 31); _sum10 = _mm256_fmadd_ps(_r030, _k20, _sum10); _sum11 = _mm256_fmadd_ps(_r031, _k21, _sum11); _sum10 = _mm256_fmadd_ps(_r032, _k22, _sum10); _sum11 = _mm256_fmadd_ps(_r033, _k23, _sum11); _sum10 = _mm256_fmadd_ps(_r034, _k24, _sum10); _sum11 = _mm256_fmadd_ps(_r035, _k25, _sum11); _sum10 = _mm256_fmadd_ps(_r036, _k26, _sum10); _sum11 = _mm256_fmadd_ps(_r037, _k27, _sum11); __m256 _r100 = _mm256_broadcast_ss(r1 + 0); __m256 _r101 = _mm256_broadcast_ss(r1 + 1); __m256 _r102 = _mm256_broadcast_ss(r1 + 2); __m256 _r103 = _mm256_broadcast_ss(r1 + 3); __m256 _r104 = _mm256_broadcast_ss(r1 + 4); __m256 _r105 = _mm256_broadcast_ss(r1 + 5); __m256 _r106 = _mm256_broadcast_ss(r1 + 6); __m256 _r107 = _mm256_broadcast_ss(r1 + 7); __m256 _k30 = _mm256_loadu_ps(kptr); __m256 _k31 = _mm256_loadu_ps(kptr + 8); __m256 _k32 = _mm256_loadu_ps(kptr + 16); __m256 _k33 = _mm256_loadu_ps(kptr + 24); __m256 _k34 = _mm256_loadu_ps(kptr + 32); __m256 _k35 = _mm256_loadu_ps(kptr + 40); __m256 _k36 = _mm256_loadu_ps(kptr + 48); __m256 _k37 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r100, _k30, _sum00); _sum01 = _mm256_fmadd_ps(_r101, _k31, _sum01); _sum00 = _mm256_fmadd_ps(_r102, _k32, _sum00); _sum01 = _mm256_fmadd_ps(_r103, _k33, _sum01); _sum00 = _mm256_fmadd_ps(_r104, _k34, _sum00); _sum01 = _mm256_fmadd_ps(_r105, _k35, _sum01); _sum00 = _mm256_fmadd_ps(_r106, _k36, _sum00); _sum01 = _mm256_fmadd_ps(_r107, _k37, _sum01); __m256 _r110 = _mm256_broadcast_ss(r1 + 8); __m256 _r111 = _mm256_broadcast_ss(r1 + 9); __m256 _r112 = _mm256_broadcast_ss(r1 + 10); __m256 _r113 = _mm256_broadcast_ss(r1 + 11); __m256 _r114 = _mm256_broadcast_ss(r1 + 12); __m256 _r115 = _mm256_broadcast_ss(r1 + 13); __m256 _r116 = _mm256_broadcast_ss(r1 + 14); __m256 _r117 = _mm256_broadcast_ss(r1 + 15); _sum10 = _mm256_fmadd_ps(_r110, _k30, _sum10); _sum11 = _mm256_fmadd_ps(_r111, _k31, _sum11); _sum10 = _mm256_fmadd_ps(_r112, _k32, _sum10); _sum11 = _mm256_fmadd_ps(_r113, _k33, _sum11); _sum10 = _mm256_fmadd_ps(_r114, _k34, _sum10); _sum11 = _mm256_fmadd_ps(_r115, _k35, _sum11); _sum10 = _mm256_fmadd_ps(_r116, _k36, _sum10); _sum11 = _mm256_fmadd_ps(_r117, _k37, _sum11); __m256 _k40 = _mm256_loadu_ps(kptr); __m256 _k41 = _mm256_loadu_ps(kptr + 8); __m256 _k42 = _mm256_loadu_ps(kptr + 16); __m256 _k43 = _mm256_loadu_ps(kptr + 24); __m256 _k44 = _mm256_loadu_ps(kptr + 32); __m256 _k45 = _mm256_loadu_ps(kptr + 40); __m256 _k46 = _mm256_loadu_ps(kptr + 48); __m256 _k47 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r110, _k40, _sum00); _sum01 = _mm256_fmadd_ps(_r111, _k41, _sum01); _sum00 = _mm256_fmadd_ps(_r112, _k42, _sum00); _sum01 = _mm256_fmadd_ps(_r113, _k43, _sum01); _sum00 = _mm256_fmadd_ps(_r114, _k44, _sum00); _sum01 = _mm256_fmadd_ps(_r115, _k45, _sum01); _sum00 = _mm256_fmadd_ps(_r116, _k46, _sum00); _sum01 = _mm256_fmadd_ps(_r117, _k47, _sum01); __m256 _r120 = _mm256_broadcast_ss(r1 + 16); __m256 _r121 = _mm256_broadcast_ss(r1 + 17); __m256 _r122 = _mm256_broadcast_ss(r1 + 18); __m256 _r123 = _mm256_broadcast_ss(r1 + 19); __m256 _r124 = _mm256_broadcast_ss(r1 + 20); __m256 _r125 = _mm256_broadcast_ss(r1 + 21); __m256 _r126 = _mm256_broadcast_ss(r1 + 22); __m256 _r127 = _mm256_broadcast_ss(r1 + 23); _sum10 = _mm256_fmadd_ps(_r120, _k40, _sum10); _sum11 = _mm256_fmadd_ps(_r121, _k41, _sum11); _sum10 = _mm256_fmadd_ps(_r122, _k42, _sum10); _sum11 = _mm256_fmadd_ps(_r123, _k43, _sum11); _sum10 = _mm256_fmadd_ps(_r124, _k44, _sum10); _sum11 = _mm256_fmadd_ps(_r125, _k45, _sum11); _sum10 = _mm256_fmadd_ps(_r126, _k46, _sum10); _sum11 = _mm256_fmadd_ps(_r127, _k47, _sum11); __m256 _k50 = _mm256_loadu_ps(kptr); __m256 _k51 = _mm256_loadu_ps(kptr + 8); __m256 _k52 = _mm256_loadu_ps(kptr + 16); __m256 _k53 = _mm256_loadu_ps(kptr + 24); __m256 _k54 = _mm256_loadu_ps(kptr + 32); __m256 _k55 = _mm256_loadu_ps(kptr + 40); __m256 _k56 = _mm256_loadu_ps(kptr + 48); __m256 _k57 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r120, _k50, _sum00); _sum01 = _mm256_fmadd_ps(_r121, _k51, _sum01); _sum00 = _mm256_fmadd_ps(_r122, _k52, _sum00); _sum01 = _mm256_fmadd_ps(_r123, _k53, _sum01); _sum00 = _mm256_fmadd_ps(_r124, _k54, _sum00); _sum01 = _mm256_fmadd_ps(_r125, _k55, _sum01); _sum00 = _mm256_fmadd_ps(_r126, _k56, _sum00); _sum01 = _mm256_fmadd_ps(_r127, _k57, _sum01); __m256 _r130 = _mm256_broadcast_ss(r1 + 24); __m256 _r131 = _mm256_broadcast_ss(r1 + 25); __m256 _r132 = _mm256_broadcast_ss(r1 + 26); __m256 _r133 = _mm256_broadcast_ss(r1 + 27); __m256 _r134 = _mm256_broadcast_ss(r1 + 28); __m256 _r135 = _mm256_broadcast_ss(r1 + 29); __m256 _r136 = _mm256_broadcast_ss(r1 + 30); __m256 _r137 = _mm256_broadcast_ss(r1 + 31); _sum10 = _mm256_fmadd_ps(_r130, _k50, _sum10); _sum11 = _mm256_fmadd_ps(_r131, _k51, _sum11); _sum10 = _mm256_fmadd_ps(_r132, _k52, _sum10); _sum11 = _mm256_fmadd_ps(_r133, _k53, _sum11); _sum10 = _mm256_fmadd_ps(_r134, _k54, _sum10); _sum11 = _mm256_fmadd_ps(_r135, _k55, _sum11); _sum10 = _mm256_fmadd_ps(_r136, _k56, _sum10); _sum11 = _mm256_fmadd_ps(_r137, _k57, _sum11); __m256 _r200 = _mm256_broadcast_ss(r2 + 0); __m256 _r201 = _mm256_broadcast_ss(r2 + 1); __m256 _r202 = _mm256_broadcast_ss(r2 + 2); __m256 _r203 = _mm256_broadcast_ss(r2 + 3); __m256 _r204 = _mm256_broadcast_ss(r2 + 4); __m256 _r205 = _mm256_broadcast_ss(r2 + 5); __m256 _r206 = _mm256_broadcast_ss(r2 + 6); __m256 _r207 = _mm256_broadcast_ss(r2 + 7); __m256 _k60 = _mm256_loadu_ps(kptr); __m256 _k61 = _mm256_loadu_ps(kptr + 8); __m256 _k62 = _mm256_loadu_ps(kptr + 16); __m256 _k63 = _mm256_loadu_ps(kptr + 24); __m256 _k64 = _mm256_loadu_ps(kptr + 32); __m256 _k65 = _mm256_loadu_ps(kptr + 40); __m256 _k66 = _mm256_loadu_ps(kptr + 48); __m256 _k67 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r200, _k60, _sum00); _sum01 = _mm256_fmadd_ps(_r201, _k61, _sum01); _sum00 = _mm256_fmadd_ps(_r202, _k62, _sum00); _sum01 = _mm256_fmadd_ps(_r203, _k63, _sum01); _sum00 = _mm256_fmadd_ps(_r204, _k64, _sum00); _sum01 = _mm256_fmadd_ps(_r205, _k65, _sum01); _sum00 = _mm256_fmadd_ps(_r206, _k66, _sum00); _sum01 = _mm256_fmadd_ps(_r207, _k67, _sum01); __m256 _r210 = _mm256_broadcast_ss(r2 + 8); __m256 _r211 = _mm256_broadcast_ss(r2 + 9); __m256 _r212 = _mm256_broadcast_ss(r2 + 10); __m256 _r213 = _mm256_broadcast_ss(r2 + 11); __m256 _r214 = _mm256_broadcast_ss(r2 + 12); __m256 _r215 = _mm256_broadcast_ss(r2 + 13); __m256 _r216 = _mm256_broadcast_ss(r2 + 14); __m256 _r217 = _mm256_broadcast_ss(r2 + 15); _sum10 = _mm256_fmadd_ps(_r210, _k60, _sum10); _sum11 = _mm256_fmadd_ps(_r211, _k61, _sum11); _sum10 = _mm256_fmadd_ps(_r212, _k62, _sum10); _sum11 = _mm256_fmadd_ps(_r213, _k63, _sum11); _sum10 = _mm256_fmadd_ps(_r214, _k64, _sum10); _sum11 = _mm256_fmadd_ps(_r215, _k65, _sum11); _sum10 = _mm256_fmadd_ps(_r216, _k66, _sum10); _sum11 = _mm256_fmadd_ps(_r217, _k67, _sum11); __m256 _k70 = _mm256_loadu_ps(kptr); __m256 _k71 = _mm256_loadu_ps(kptr + 8); __m256 _k72 = _mm256_loadu_ps(kptr + 16); __m256 _k73 = _mm256_loadu_ps(kptr + 24); __m256 _k74 = _mm256_loadu_ps(kptr + 32); __m256 _k75 = _mm256_loadu_ps(kptr + 40); __m256 _k76 = _mm256_loadu_ps(kptr + 48); __m256 _k77 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_fmadd_ps(_r210, _k70, _sum00); _sum01 = _mm256_fmadd_ps(_r211, _k71, _sum01); _sum00 = _mm256_fmadd_ps(_r212, _k72, _sum00); _sum01 = _mm256_fmadd_ps(_r213, _k73, _sum01); _sum00 = _mm256_fmadd_ps(_r214, _k74, _sum00); _sum01 = _mm256_fmadd_ps(_r215, _k75, _sum01); _sum00 = _mm256_fmadd_ps(_r216, _k76, _sum00); _sum01 = _mm256_fmadd_ps(_r217, _k77, _sum01); __m256 _r220 = _mm256_broadcast_ss(r2 + 16); __m256 _r221 = _mm256_broadcast_ss(r2 + 17); __m256 _r222 = _mm256_broadcast_ss(r2 + 18); __m256 _r223 = _mm256_broadcast_ss(r2 + 19); __m256 _r224 = _mm256_broadcast_ss(r2 + 20); __m256 _r225 = _mm256_broadcast_ss(r2 + 21); __m256 _r226 = _mm256_broadcast_ss(r2 + 22); __m256 _r227 = _mm256_broadcast_ss(r2 + 23); _sum10 = _mm256_fmadd_ps(_r220, _k70, _sum10); _sum11 = _mm256_fmadd_ps(_r221, _k71, _sum11); _sum10 = _mm256_fmadd_ps(_r222, _k72, _sum10); _sum11 = _mm256_fmadd_ps(_r223, _k73, _sum11); _sum10 = _mm256_fmadd_ps(_r224, _k74, _sum10); _sum11 = _mm256_fmadd_ps(_r225, _k75, _sum11); _sum10 = _mm256_fmadd_ps(_r226, _k76, _sum10); _sum11 = _mm256_fmadd_ps(_r227, _k77, _sum11); __m256 _k80 = _mm256_loadu_ps(kptr); __m256 _k81 = _mm256_loadu_ps(kptr + 8); __m256 _k82 = _mm256_loadu_ps(kptr + 16); __m256 _k83 = _mm256_loadu_ps(kptr + 24); __m256 _k84 = _mm256_loadu_ps(kptr + 32); __m256 _k85 = _mm256_loadu_ps(kptr + 40); __m256 _k86 = _mm256_loadu_ps(kptr + 48); __m256 _k87 = _mm256_loadu_ps(kptr + 56); _sum00 = _mm256_fmadd_ps(_r220, _k80, _sum00); _sum01 = _mm256_fmadd_ps(_r221, _k81, _sum01); _sum00 = _mm256_fmadd_ps(_r222, _k82, _sum00); _sum01 = _mm256_fmadd_ps(_r223, _k83, _sum01); _sum00 = _mm256_fmadd_ps(_r224, _k84, _sum00); _sum01 = _mm256_fmadd_ps(_r225, _k85, _sum01); _sum00 = _mm256_fmadd_ps(_r226, _k86, _sum00); _sum01 = _mm256_fmadd_ps(_r227, _k87, _sum01); __m256 _r230 = _mm256_broadcast_ss(r2 + 24); __m256 _r231 = _mm256_broadcast_ss(r2 + 25); __m256 _r232 = _mm256_broadcast_ss(r2 + 26); __m256 _r233 = _mm256_broadcast_ss(r2 + 27); __m256 _r234 = _mm256_broadcast_ss(r2 + 28); __m256 _r235 = _mm256_broadcast_ss(r2 + 29); __m256 _r236 = _mm256_broadcast_ss(r2 + 30); __m256 _r237 = _mm256_broadcast_ss(r2 + 31); _sum10 = _mm256_fmadd_ps(_r230, _k80, _sum10); _sum11 = _mm256_fmadd_ps(_r231, _k81, _sum11); _sum10 = _mm256_fmadd_ps(_r232, _k82, _sum10); _sum11 = _mm256_fmadd_ps(_r233, _k83, _sum11); _sum10 = _mm256_fmadd_ps(_r234, _k84, _sum10); _sum11 = _mm256_fmadd_ps(_r235, _k85, _sum11); _sum10 = _mm256_fmadd_ps(_r236, _k86, _sum10); _sum11 = _mm256_fmadd_ps(_r237, _k87, _sum11); kptr -= 64 * 8; _sum00 = _mm256_add_ps(_sum00, _sum01); _sum10 = _mm256_add_ps(_sum10, _sum11); _mm256_storeu_ps(outptr, _sum00); _mm256_storeu_ps(outptr + 8, _sum10); r0 += 16; r1 += 16; r2 += 16; outptr += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(outptr); __m256 _sum1 = _mm256_setzero_ps(); __m256 _r000 = _mm256_broadcast_ss(r0 + 0); __m256 _r001 = _mm256_broadcast_ss(r0 + 1); __m256 _r002 = _mm256_broadcast_ss(r0 + 2); __m256 _r003 = _mm256_broadcast_ss(r0 + 3); __m256 _r004 = _mm256_broadcast_ss(r0 + 4); __m256 _r005 = _mm256_broadcast_ss(r0 + 5); __m256 _r006 = _mm256_broadcast_ss(r0 + 6); __m256 _r007 = _mm256_broadcast_ss(r0 + 7); __m256 _k00 = _mm256_loadu_ps(kptr); __m256 _k01 = _mm256_loadu_ps(kptr + 8); __m256 _k02 = _mm256_loadu_ps(kptr + 16); __m256 _k03 = _mm256_loadu_ps(kptr + 24); __m256 _k04 = _mm256_loadu_ps(kptr + 32); __m256 _k05 = _mm256_loadu_ps(kptr + 40); __m256 _k06 = _mm256_loadu_ps(kptr + 48); __m256 _k07 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r000, _k00, _sum0); _sum1 = _mm256_fmadd_ps(_r001, _k01, _sum1); _sum0 = _mm256_fmadd_ps(_r002, _k02, _sum0); _sum1 = _mm256_fmadd_ps(_r003, _k03, _sum1); _sum0 = _mm256_fmadd_ps(_r004, _k04, _sum0); _sum1 = _mm256_fmadd_ps(_r005, _k05, _sum1); _sum0 = _mm256_fmadd_ps(_r006, _k06, _sum0); _sum1 = _mm256_fmadd_ps(_r007, _k07, _sum1); __m256 _r010 = _mm256_broadcast_ss(r0 + 8); __m256 _r011 = _mm256_broadcast_ss(r0 + 9); __m256 _r012 = _mm256_broadcast_ss(r0 + 10); __m256 _r013 = _mm256_broadcast_ss(r0 + 11); __m256 _r014 = _mm256_broadcast_ss(r0 + 12); __m256 _r015 = _mm256_broadcast_ss(r0 + 13); __m256 _r016 = _mm256_broadcast_ss(r0 + 14); __m256 _r017 = _mm256_broadcast_ss(r0 + 15); __m256 _k10 = _mm256_loadu_ps(kptr); __m256 _k11 = _mm256_loadu_ps(kptr + 8); __m256 _k12 = _mm256_loadu_ps(kptr + 16); __m256 _k13 = _mm256_loadu_ps(kptr + 24); __m256 _k14 = _mm256_loadu_ps(kptr + 32); __m256 _k15 = _mm256_loadu_ps(kptr + 40); __m256 _k16 = _mm256_loadu_ps(kptr + 48); __m256 _k17 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r010, _k10, _sum0); _sum1 = _mm256_fmadd_ps(_r011, _k11, _sum1); _sum0 = _mm256_fmadd_ps(_r012, _k12, _sum0); _sum1 = _mm256_fmadd_ps(_r013, _k13, _sum1); _sum0 = _mm256_fmadd_ps(_r014, _k14, _sum0); _sum1 = _mm256_fmadd_ps(_r015, _k15, _sum1); _sum0 = _mm256_fmadd_ps(_r016, _k16, _sum0); _sum1 = _mm256_fmadd_ps(_r017, _k17, _sum1); __m256 _r020 = _mm256_broadcast_ss(r0 + 16); __m256 _r021 = _mm256_broadcast_ss(r0 + 17); __m256 _r022 = _mm256_broadcast_ss(r0 + 18); __m256 _r023 = _mm256_broadcast_ss(r0 + 19); __m256 _r024 = _mm256_broadcast_ss(r0 + 20); __m256 _r025 = _mm256_broadcast_ss(r0 + 21); __m256 _r026 = _mm256_broadcast_ss(r0 + 22); __m256 _r027 = _mm256_broadcast_ss(r0 + 23); __m256 _k20 = _mm256_loadu_ps(kptr); __m256 _k21 = _mm256_loadu_ps(kptr + 8); __m256 _k22 = _mm256_loadu_ps(kptr + 16); __m256 _k23 = _mm256_loadu_ps(kptr + 24); __m256 _k24 = _mm256_loadu_ps(kptr + 32); __m256 _k25 = _mm256_loadu_ps(kptr + 40); __m256 _k26 = _mm256_loadu_ps(kptr + 48); __m256 _k27 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r020, _k20, _sum0); _sum1 = _mm256_fmadd_ps(_r021, _k21, _sum1); _sum0 = _mm256_fmadd_ps(_r022, _k22, _sum0); _sum1 = _mm256_fmadd_ps(_r023, _k23, _sum1); _sum0 = _mm256_fmadd_ps(_r024, _k24, _sum0); _sum1 = _mm256_fmadd_ps(_r025, _k25, _sum1); _sum0 = _mm256_fmadd_ps(_r026, _k26, _sum0); _sum1 = _mm256_fmadd_ps(_r027, _k27, _sum1); __m256 _r100 = _mm256_broadcast_ss(r1 + 0); __m256 _r101 = _mm256_broadcast_ss(r1 + 1); __m256 _r102 = _mm256_broadcast_ss(r1 + 2); __m256 _r103 = _mm256_broadcast_ss(r1 + 3); __m256 _r104 = _mm256_broadcast_ss(r1 + 4); __m256 _r105 = _mm256_broadcast_ss(r1 + 5); __m256 _r106 = _mm256_broadcast_ss(r1 + 6); __m256 _r107 = _mm256_broadcast_ss(r1 + 7); __m256 _k30 = _mm256_loadu_ps(kptr); __m256 _k31 = _mm256_loadu_ps(kptr + 8); __m256 _k32 = _mm256_loadu_ps(kptr + 16); __m256 _k33 = _mm256_loadu_ps(kptr + 24); __m256 _k34 = _mm256_loadu_ps(kptr + 32); __m256 _k35 = _mm256_loadu_ps(kptr + 40); __m256 _k36 = _mm256_loadu_ps(kptr + 48); __m256 _k37 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r100, _k30, _sum0); _sum1 = _mm256_fmadd_ps(_r101, _k31, _sum1); _sum0 = _mm256_fmadd_ps(_r102, _k32, _sum0); _sum1 = _mm256_fmadd_ps(_r103, _k33, _sum1); _sum0 = _mm256_fmadd_ps(_r104, _k34, _sum0); _sum1 = _mm256_fmadd_ps(_r105, _k35, _sum1); _sum0 = _mm256_fmadd_ps(_r106, _k36, _sum0); _sum1 = _mm256_fmadd_ps(_r107, _k37, _sum1); __m256 _r110 = _mm256_broadcast_ss(r1 + 8); __m256 _r111 = _mm256_broadcast_ss(r1 + 9); __m256 _r112 = _mm256_broadcast_ss(r1 + 10); __m256 _r113 = _mm256_broadcast_ss(r1 + 11); __m256 _r114 = _mm256_broadcast_ss(r1 + 12); __m256 _r115 = _mm256_broadcast_ss(r1 + 13); __m256 _r116 = _mm256_broadcast_ss(r1 + 14); __m256 _r117 = _mm256_broadcast_ss(r1 + 15); __m256 _k40 = _mm256_loadu_ps(kptr); __m256 _k41 = _mm256_loadu_ps(kptr + 8); __m256 _k42 = _mm256_loadu_ps(kptr + 16); __m256 _k43 = _mm256_loadu_ps(kptr + 24); __m256 _k44 = _mm256_loadu_ps(kptr + 32); __m256 _k45 = _mm256_loadu_ps(kptr + 40); __m256 _k46 = _mm256_loadu_ps(kptr + 48); __m256 _k47 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r110, _k40, _sum0); _sum1 = _mm256_fmadd_ps(_r111, _k41, _sum1); _sum0 = _mm256_fmadd_ps(_r112, _k42, _sum0); _sum1 = _mm256_fmadd_ps(_r113, _k43, _sum1); _sum0 = _mm256_fmadd_ps(_r114, _k44, _sum0); _sum1 = _mm256_fmadd_ps(_r115, _k45, _sum1); _sum0 = _mm256_fmadd_ps(_r116, _k46, _sum0); _sum1 = _mm256_fmadd_ps(_r117, _k47, _sum1); __m256 _r120 = _mm256_broadcast_ss(r1 + 16); __m256 _r121 = _mm256_broadcast_ss(r1 + 17); __m256 _r122 = _mm256_broadcast_ss(r1 + 18); __m256 _r123 = _mm256_broadcast_ss(r1 + 19); __m256 _r124 = _mm256_broadcast_ss(r1 + 20); __m256 _r125 = _mm256_broadcast_ss(r1 + 21); __m256 _r126 = _mm256_broadcast_ss(r1 + 22); __m256 _r127 = _mm256_broadcast_ss(r1 + 23); __m256 _k50 = _mm256_loadu_ps(kptr); __m256 _k51 = _mm256_loadu_ps(kptr + 8); __m256 _k52 = _mm256_loadu_ps(kptr + 16); __m256 _k53 = _mm256_loadu_ps(kptr + 24); __m256 _k54 = _mm256_loadu_ps(kptr + 32); __m256 _k55 = _mm256_loadu_ps(kptr + 40); __m256 _k56 = _mm256_loadu_ps(kptr + 48); __m256 _k57 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r120, _k50, _sum0); _sum1 = _mm256_fmadd_ps(_r121, _k51, _sum1); _sum0 = _mm256_fmadd_ps(_r122, _k52, _sum0); _sum1 = _mm256_fmadd_ps(_r123, _k53, _sum1); _sum0 = _mm256_fmadd_ps(_r124, _k54, _sum0); _sum1 = _mm256_fmadd_ps(_r125, _k55, _sum1); _sum0 = _mm256_fmadd_ps(_r126, _k56, _sum0); _sum1 = _mm256_fmadd_ps(_r127, _k57, _sum1); __m256 _r200 = _mm256_broadcast_ss(r2 + 0); __m256 _r201 = _mm256_broadcast_ss(r2 + 1); __m256 _r202 = _mm256_broadcast_ss(r2 + 2); __m256 _r203 = _mm256_broadcast_ss(r2 + 3); __m256 _r204 = _mm256_broadcast_ss(r2 + 4); __m256 _r205 = _mm256_broadcast_ss(r2 + 5); __m256 _r206 = _mm256_broadcast_ss(r2 + 6); __m256 _r207 = _mm256_broadcast_ss(r2 + 7); __m256 _k60 = _mm256_loadu_ps(kptr); __m256 _k61 = _mm256_loadu_ps(kptr + 8); __m256 _k62 = _mm256_loadu_ps(kptr + 16); __m256 _k63 = _mm256_loadu_ps(kptr + 24); __m256 _k64 = _mm256_loadu_ps(kptr + 32); __m256 _k65 = _mm256_loadu_ps(kptr + 40); __m256 _k66 = _mm256_loadu_ps(kptr + 48); __m256 _k67 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r200, _k60, _sum0); _sum1 = _mm256_fmadd_ps(_r201, _k61, _sum1); _sum0 = _mm256_fmadd_ps(_r202, _k62, _sum0); _sum1 = _mm256_fmadd_ps(_r203, _k63, _sum1); _sum0 = _mm256_fmadd_ps(_r204, _k64, _sum0); _sum1 = _mm256_fmadd_ps(_r205, _k65, _sum1); _sum0 = _mm256_fmadd_ps(_r206, _k66, _sum0); _sum1 = _mm256_fmadd_ps(_r207, _k67, _sum1); __m256 _r210 = _mm256_broadcast_ss(r2 + 8); __m256 _r211 = _mm256_broadcast_ss(r2 + 9); __m256 _r212 = _mm256_broadcast_ss(r2 + 10); __m256 _r213 = _mm256_broadcast_ss(r2 + 11); __m256 _r214 = _mm256_broadcast_ss(r2 + 12); __m256 _r215 = _mm256_broadcast_ss(r2 + 13); __m256 _r216 = _mm256_broadcast_ss(r2 + 14); __m256 _r217 = _mm256_broadcast_ss(r2 + 15); __m256 _k70 = _mm256_loadu_ps(kptr); __m256 _k71 = _mm256_loadu_ps(kptr + 8); __m256 _k72 = _mm256_loadu_ps(kptr + 16); __m256 _k73 = _mm256_loadu_ps(kptr + 24); __m256 _k74 = _mm256_loadu_ps(kptr + 32); __m256 _k75 = _mm256_loadu_ps(kptr + 40); __m256 _k76 = _mm256_loadu_ps(kptr + 48); __m256 _k77 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_fmadd_ps(_r210, _k70, _sum0); _sum1 = _mm256_fmadd_ps(_r211, _k71, _sum1); _sum0 = _mm256_fmadd_ps(_r212, _k72, _sum0); _sum1 = _mm256_fmadd_ps(_r213, _k73, _sum1); _sum0 = _mm256_fmadd_ps(_r214, _k74, _sum0); _sum1 = _mm256_fmadd_ps(_r215, _k75, _sum1); _sum0 = _mm256_fmadd_ps(_r216, _k76, _sum0); _sum1 = _mm256_fmadd_ps(_r217, _k77, _sum1); __m256 _r220 = _mm256_broadcast_ss(r2 + 16); __m256 _r221 = _mm256_broadcast_ss(r2 + 17); __m256 _r222 = _mm256_broadcast_ss(r2 + 18); __m256 _r223 = _mm256_broadcast_ss(r2 + 19); __m256 _r224 = _mm256_broadcast_ss(r2 + 20); __m256 _r225 = _mm256_broadcast_ss(r2 + 21); __m256 _r226 = _mm256_broadcast_ss(r2 + 22); __m256 _r227 = _mm256_broadcast_ss(r2 + 23); __m256 _k80 = _mm256_loadu_ps(kptr); __m256 _k81 = _mm256_loadu_ps(kptr + 8); __m256 _k82 = _mm256_loadu_ps(kptr + 16); __m256 _k83 = _mm256_loadu_ps(kptr + 24); __m256 _k84 = _mm256_loadu_ps(kptr + 32); __m256 _k85 = _mm256_loadu_ps(kptr + 40); __m256 _k86 = _mm256_loadu_ps(kptr + 48); __m256 _k87 = _mm256_loadu_ps(kptr + 56); _sum0 = _mm256_fmadd_ps(_r220, _k80, _sum0); _sum1 = _mm256_fmadd_ps(_r221, _k81, _sum1); _sum0 = _mm256_fmadd_ps(_r222, _k82, _sum0); _sum1 = _mm256_fmadd_ps(_r223, _k83, _sum1); _sum0 = _mm256_fmadd_ps(_r224, _k84, _sum0); _sum1 = _mm256_fmadd_ps(_r225, _k85, _sum1); _sum0 = _mm256_fmadd_ps(_r226, _k86, _sum0); _sum1 = _mm256_fmadd_ps(_r227, _k87, _sum1); kptr -= 64 * 8; _sum0 = _mm256_add_ps(_sum0, _sum1); _mm256_storeu_ps(outptr, _sum0); r0 += 8; r1 += 8; r2 += 8; outptr += 8; } r0 += 16; r1 += 16; r2 += 16; } } } } static void conv3x3s1_winograd64_transform_kernel_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8b-8a-inch/8a-64-outch/8b; kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)4u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack8.channel(q / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 7 < inch; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00[32] = k04[k]; g00[33] = k14[k]; g00[34] = k24[k]; g00[35] = k34[k]; g00[36] = k44[k]; g00[37] = k54[k]; g00[38] = k64[k]; g00[39] = k74[k]; g00[40] = k05[k]; g00[41] = k15[k]; g00[42] = k25[k]; g00[43] = k35[k]; g00[44] = k45[k]; g00[45] = k55[k]; g00[46] = k65[k]; g00[47] = k75[k]; g00[48] = k06[k]; g00[49] = k16[k]; g00[50] = k26[k]; g00[51] = k36[k]; g00[52] = k46[k]; g00[53] = k56[k]; g00[54] = k66[k]; g00[55] = k76[k]; g00[56] = k07[k]; g00[57] = k17[k]; g00[58] = k27[k]; g00[59] = k37[k]; g00[60] = k47[k]; g00[61] = k57[k]; g00[62] = k67[k]; g00[63] = k77[k]; g00 += 64; } } } } static void conv3x3s1_winograd64_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _tmp0m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r00, _r06), _mm256_sub_ps(_r04, _r02), 5.25f); __m256 _tmp7m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r07, _r01), _mm256_sub_ps(_r03, _r05), 5.25f); _mm256_storeu_ps(tmp[0][m], _tmp0m); _mm256_storeu_ps(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; __m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_r02, _r06), _r04, 4.25f); __m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); __m256 _tmp1m = _mm256_add_ps(_tmp12a, _tmp12b); __m256 _tmp2m = _mm256_sub_ps(_tmp12a, _tmp12b); _mm256_storeu_ps(tmp[1][m], _tmp1m); _mm256_storeu_ps(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; __m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_r06, _r02, 0.25f), _r04, 1.25f); __m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(0.5f)), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); __m256 _tmp3m = _mm256_add_ps(_tmp34a, _tmp34b); __m256 _tmp4m = _mm256_sub_ps(_tmp34a, _tmp34b); _mm256_storeu_ps(tmp[3][m], _tmp3m); _mm256_storeu_ps(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; __m256 _tmp56a = _mm256_fmadd_1_ps(_r06, _mm256_fmrsub_1_ps(_r02, _r04, 1.25f), 4.f); __m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(2.f)), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); __m256 _tmp5m = _mm256_add_ps(_tmp56a, _tmp56b); __m256 _tmp6m = _mm256_sub_ps(_tmp56a, _tmp56b); _mm256_storeu_ps(tmp[5][m], _tmp5m); _mm256_storeu_ps(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 8; float* r0_tm_1 = r0_tm_0 + tiles * 8; float* r0_tm_2 = r0_tm_0 + tiles * 16; float* r0_tm_3 = r0_tm_0 + tiles * 24; float* r0_tm_4 = r0_tm_0 + tiles * 32; float* r0_tm_5 = r0_tm_0 + tiles * 40; float* r0_tm_6 = r0_tm_0 + tiles * 48; float* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { __m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]); __m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]); __m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]); __m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]); __m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]); __m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]); __m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]); __m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]); __m256 _r0tm0 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp00, _tmp06), _mm256_sub_ps(_tmp04, _tmp02), 5.25f); __m256 _r0tm7 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp07, _tmp01), _mm256_sub_ps(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; __m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp02, _tmp06), _tmp04, 4.25f); __m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); __m256 _r0tm1 = _mm256_add_ps(_tmp12a, _tmp12b); __m256 _r0tm2 = _mm256_sub_ps(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; __m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); __m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(0.5f)), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); __m256 _r0tm3 = _mm256_add_ps(_tmp34a, _tmp34b); __m256 _r0tm4 = _mm256_sub_ps(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; __m256 _tmp56a = _mm256_fmadd_1_ps(_tmp06, _mm256_fmrsub_1_ps(_tmp02, _tmp04, 1.25f), 4.f); __m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(2.f)), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); __m256 _r0tm5 = _mm256_add_ps(_tmp56a, _tmp56b); __m256 _r0tm6 = _mm256_sub_ps(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; _mm256_storeu_ps(r0_tm_0, _r0tm0); _mm256_storeu_ps(r0_tm_1, _r0tm1); _mm256_storeu_ps(r0_tm_2, _r0tm2); _mm256_storeu_ps(r0_tm_3, _r0tm3); _mm256_storeu_ps(r0_tm_4, _r0tm4); _mm256_storeu_ps(r0_tm_5, _r0tm5); _mm256_storeu_ps(r0_tm_6, _r0tm6); _mm256_storeu_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); __m256 _r4 = _mm256_loadu_ps(r0 + 32); __m256 _r5 = _mm256_loadu_ps(r0 + 40); __m256 _r6 = _mm256_loadu_ps(r0 + 48); __m256 _r7 = _mm256_loadu_ps(r0 + 56); __m256 _r8 = _mm256_loadu_ps(r0 + 64); __m256 _r9 = _mm256_loadu_ps(r0 + 72); __m256 _r10 = _mm256_loadu_ps(r0 + 80); __m256 _r11 = _mm256_loadu_ps(r0 + 88); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); _mm256_storeu_ps(tm2p + 32, _r4); _mm256_storeu_ps(tm2p + 40, _r5); _mm256_storeu_ps(tm2p + 48, _r6); _mm256_storeu_ps(tm2p + 56, _r7); _mm256_storeu_ps(tm2p + 64, _r8); _mm256_storeu_ps(tm2p + 72, _r9); _mm256_storeu_ps(tm2p + 80, _r10); _mm256_storeu_ps(tm2p + 88, _r11); tm2p += 96; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); __m256 _r4 = _mm256_loadu_ps(r0 + 32); __m256 _r5 = _mm256_loadu_ps(r0 + 40); _mm256_storeu_ps(tm2p + 32, _r4); _mm256_storeu_ps(tm2p + 40, _r5); __m256 _r6 = _mm256_loadu_ps(r0 + 48); __m256 _r7 = _mm256_loadu_ps(r0 + 56); _mm256_storeu_ps(tm2p + 48, _r6); _mm256_storeu_ps(tm2p + 56, _r7); tm2p += 64; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); tm2p += 32; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); tm2p += 16; r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); _mm256_storeu_ps(tm2p, _r0); tm2p += 8; r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); __m256 _sum4 = _mm256_set1_ps(0.f); __m256 _sum5 = _mm256_set1_ps(0.f); __m256 _sum6 = _mm256_set1_ps(0.f); __m256 _sum7 = _mm256_set1_ps(0.f); __m256 _sum8 = _mm256_set1_ps(0.f); __m256 _sum9 = _mm256_set1_ps(0.f); __m256 _sum10 = _mm256_set1_ps(0.f); __m256 _sum11 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); __m256 _r04 = _mm256_broadcast_ss(r0 + 32); __m256 _r05 = _mm256_broadcast_ss(r0 + 40); __m256 _r06 = _mm256_broadcast_ss(r0 + 48); __m256 _r07 = _mm256_broadcast_ss(r0 + 56); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); __m256 _r08 = _mm256_broadcast_ss(r0 + 64); __m256 _r09 = _mm256_broadcast_ss(r0 + 72); __m256 _r010 = _mm256_broadcast_ss(r0 + 80); __m256 _r011 = _mm256_broadcast_ss(r0 + 88); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 33); _r05 = _mm256_broadcast_ss(r0 + 41); _r06 = _mm256_broadcast_ss(r0 + 49); _r07 = _mm256_broadcast_ss(r0 + 57); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 65); _r09 = _mm256_broadcast_ss(r0 + 73); _r010 = _mm256_broadcast_ss(r0 + 81); _r011 = _mm256_broadcast_ss(r0 + 89); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 34); _r05 = _mm256_broadcast_ss(r0 + 42); _r06 = _mm256_broadcast_ss(r0 + 50); _r07 = _mm256_broadcast_ss(r0 + 58); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 66); _r09 = _mm256_broadcast_ss(r0 + 74); _r010 = _mm256_broadcast_ss(r0 + 82); _r011 = _mm256_broadcast_ss(r0 + 90); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 35); _r05 = _mm256_broadcast_ss(r0 + 43); _r06 = _mm256_broadcast_ss(r0 + 51); _r07 = _mm256_broadcast_ss(r0 + 59); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 67); _r09 = _mm256_broadcast_ss(r0 + 75); _r010 = _mm256_broadcast_ss(r0 + 83); _r011 = _mm256_broadcast_ss(r0 + 91); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 36); _r05 = _mm256_broadcast_ss(r0 + 44); _r06 = _mm256_broadcast_ss(r0 + 52); _r07 = _mm256_broadcast_ss(r0 + 60); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 68); _r09 = _mm256_broadcast_ss(r0 + 76); _r010 = _mm256_broadcast_ss(r0 + 84); _r011 = _mm256_broadcast_ss(r0 + 92); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 37); _r05 = _mm256_broadcast_ss(r0 + 45); _r06 = _mm256_broadcast_ss(r0 + 53); _r07 = _mm256_broadcast_ss(r0 + 61); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 69); _r09 = _mm256_broadcast_ss(r0 + 77); _r010 = _mm256_broadcast_ss(r0 + 85); _r011 = _mm256_broadcast_ss(r0 + 93); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 38); _r05 = _mm256_broadcast_ss(r0 + 46); _r06 = _mm256_broadcast_ss(r0 + 54); _r07 = _mm256_broadcast_ss(r0 + 62); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 70); _r09 = _mm256_broadcast_ss(r0 + 78); _r010 = _mm256_broadcast_ss(r0 + 86); _r011 = _mm256_broadcast_ss(r0 + 94); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 39); _r05 = _mm256_broadcast_ss(r0 + 47); _r06 = _mm256_broadcast_ss(r0 + 55); _r07 = _mm256_broadcast_ss(r0 + 63); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 71); _r09 = _mm256_broadcast_ss(r0 + 79); _r010 = _mm256_broadcast_ss(r0 + 87); _r011 = _mm256_broadcast_ss(r0 + 95); _sum8 = _mm256_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_fmadd_ps(_k01, _r011, _sum11); k01 += 64; r0 += 96; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); _mm256_storeu_ps(output0_tm + 32, _sum4); _mm256_storeu_ps(output0_tm + 40, _sum5); _mm256_storeu_ps(output0_tm + 48, _sum6); _mm256_storeu_ps(output0_tm + 56, _sum7); _mm256_storeu_ps(output0_tm + 64, _sum8); _mm256_storeu_ps(output0_tm + 72, _sum9); _mm256_storeu_ps(output0_tm + 80, _sum10); _mm256_storeu_ps(output0_tm + 88, _sum11); output0_tm += 96; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); __m256 _sum4 = _mm256_set1_ps(0.f); __m256 _sum5 = _mm256_set1_ps(0.f); __m256 _sum6 = _mm256_set1_ps(0.f); __m256 _sum7 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); __m256 _r04 = _mm256_broadcast_ss(r0 + 32); __m256 _r05 = _mm256_broadcast_ss(r0 + 40); __m256 _r06 = _mm256_broadcast_ss(r0 + 48); __m256 _r07 = _mm256_broadcast_ss(r0 + 56); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 33); _r05 = _mm256_broadcast_ss(r0 + 41); _r06 = _mm256_broadcast_ss(r0 + 49); _r07 = _mm256_broadcast_ss(r0 + 57); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 34); _r05 = _mm256_broadcast_ss(r0 + 42); _r06 = _mm256_broadcast_ss(r0 + 50); _r07 = _mm256_broadcast_ss(r0 + 58); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 35); _r05 = _mm256_broadcast_ss(r0 + 43); _r06 = _mm256_broadcast_ss(r0 + 51); _r07 = _mm256_broadcast_ss(r0 + 59); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 36); _r05 = _mm256_broadcast_ss(r0 + 44); _r06 = _mm256_broadcast_ss(r0 + 52); _r07 = _mm256_broadcast_ss(r0 + 60); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 37); _r05 = _mm256_broadcast_ss(r0 + 45); _r06 = _mm256_broadcast_ss(r0 + 53); _r07 = _mm256_broadcast_ss(r0 + 61); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 38); _r05 = _mm256_broadcast_ss(r0 + 46); _r06 = _mm256_broadcast_ss(r0 + 54); _r07 = _mm256_broadcast_ss(r0 + 62); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 39); _r05 = _mm256_broadcast_ss(r0 + 47); _r06 = _mm256_broadcast_ss(r0 + 55); _r07 = _mm256_broadcast_ss(r0 + 63); _sum4 = _mm256_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_fmadd_ps(_k01, _r07, _sum7); k01 += 64; r0 += 64; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); _mm256_storeu_ps(output0_tm + 32, _sum4); _mm256_storeu_ps(output0_tm + 40, _sum5); _mm256_storeu_ps(output0_tm + 48, _sum6); _mm256_storeu_ps(output0_tm + 56, _sum7); output0_tm += 64; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_fmadd_ps(_k01, _r03, _sum3); k01 += 64; r0 += 32; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); output0_tm += 32; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r0 = _mm256_broadcast_ss(r0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 8); _r0 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 16); _r0 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 24); _r0 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 32); _r0 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 40); _r0 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 48); _r0 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 56); _r0 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _sum0 = _mm256_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_fmadd_ps(_k01, _r01, _sum1); k01 += 64; r0 += 16; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); output0_tm += 16; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r0 = _mm256_broadcast_ss(r0); __m256 _mul0 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 8); _r0 = _mm256_broadcast_ss(r0 + 1); __m256 _mul1 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 16); _r0 = _mm256_broadcast_ss(r0 + 2); __m256 _mul2 = _mm256_mul_ps(_k01, _r0); __m256 _add01 = _mm256_add_ps(_mul0, _mul1); _k01 = _mm256_loadu_ps(k01 + 24); _r0 = _mm256_broadcast_ss(r0 + 3); __m256 _mul3 = _mm256_mul_ps(_k01, _r0); __m256 _add23 = _mm256_add_ps(_mul2, _mul3); __m256 _add0123 = _mm256_add_ps(_add01, _add23); _sum0 = _mm256_add_ps(_sum0, _add0123); _k01 = _mm256_loadu_ps(k01 + 32); _r0 = _mm256_broadcast_ss(r0 + 4); __m256 _mul4 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 40); _r0 = _mm256_broadcast_ss(r0 + 5); __m256 _mul5 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 48); _r0 = _mm256_broadcast_ss(r0 + 6); __m256 _mul6 = _mm256_mul_ps(_k01, _r0); __m256 _add45 = _mm256_add_ps(_mul4, _mul5); _k01 = _mm256_loadu_ps(k01 + 56); _r0 = _mm256_broadcast_ss(r0 + 7); __m256 _mul7 = _mm256_mul_ps(_k01, _r0); __m256 _add67 = _mm256_add_ps(_mul6, _mul7); __m256 _add4567 = _mm256_add_ps(_add45, _add67); _sum0 = _mm256_add_ps(_sum0, _add4567); k01 += 64; r0 += 8; } _mm256_storeu_ps(output0_tm, _sum0); output0_tm += 8; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); float tmp[6][8][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 8; const float* output0_tm_1 = output0_tm_0 + tiles * 8; const float* output0_tm_2 = output0_tm_0 + tiles * 16; const float* output0_tm_3 = output0_tm_0 + tiles * 24; const float* output0_tm_4 = output0_tm_0 + tiles * 32; const float* output0_tm_5 = output0_tm_0 + tiles * 40; const float* output0_tm_6 = output0_tm_0 + tiles * 48; const float* output0_tm_7 = output0_tm_0 + tiles * 56; float* output0 = out0.row(i * 6) + (j * 6) * 8; // TODO neon optimize for (int m = 0; m < 8; m++) { __m256 _out0tm0 = _mm256_loadu_ps(output0_tm_0); __m256 _out0tm1 = _mm256_loadu_ps(output0_tm_1); __m256 _out0tm2 = _mm256_loadu_ps(output0_tm_2); __m256 _out0tm3 = _mm256_loadu_ps(output0_tm_3); __m256 _out0tm4 = _mm256_loadu_ps(output0_tm_4); __m256 _out0tm5 = _mm256_loadu_ps(output0_tm_5); __m256 _out0tm6 = _mm256_loadu_ps(output0_tm_6); __m256 _out0tm7 = _mm256_loadu_ps(output0_tm_7); __m256 _tmp024a = _mm256_add_ps(_out0tm1, _out0tm2); __m256 _tmp135a = _mm256_sub_ps(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; __m256 _tmp024b = _mm256_add_ps(_out0tm3, _out0tm4); __m256 _tmp135b = _mm256_sub_ps(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; __m256 _tmp024c = _mm256_add_ps(_out0tm5, _out0tm6); __m256 _tmp135c = _mm256_sub_ps(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; __m256 _tmp0m = _mm256_add_ps(_mm256_add_ps(_out0tm0, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f)); __m256 _tmp2m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); __m256 _tmp4m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); _mm256_storeu_ps(tmp[0][m], _tmp0m); _mm256_storeu_ps(tmp[2][m], _tmp2m); _mm256_storeu_ps(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; __m256 _tmp1m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); __m256 _tmp3m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); __m256 _tmp5m = _mm256_add_ps(_mm256_add_ps(_out0tm7, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f)); _mm256_storeu_ps(tmp[1][m], _tmp1m); _mm256_storeu_ps(tmp[3][m], _tmp3m); _mm256_storeu_ps(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 64; output0_tm_1 += tiles * 64; output0_tm_2 += tiles * 64; output0_tm_3 += tiles * 64; output0_tm_4 += tiles * 64; output0_tm_5 += tiles * 64; output0_tm_6 += tiles * 64; output0_tm_7 += tiles * 64; } for (int m = 0; m < 6; m++) { __m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]); __m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]); __m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]); __m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]); __m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]); __m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]); __m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]); __m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]); __m256 _tmp024a = _mm256_add_ps(_tmp01, _tmp02); __m256 _tmp135a = _mm256_sub_ps(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; __m256 _tmp024b = _mm256_add_ps(_tmp03, _tmp04); __m256 _tmp135b = _mm256_sub_ps(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; __m256 _tmp024c = _mm256_add_ps(_tmp05, _tmp06); __m256 _tmp135c = _mm256_sub_ps(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; __m256 _out00 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp00, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f))); __m256 _out02 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); __m256 _out04 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); _mm256_storeu_ps(output0, _out00); _mm256_storeu_ps(output0 + 16, _out02); _mm256_storeu_ps(output0 + 32, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; __m256 _out01 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); __m256 _out03 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); __m256 _out05 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp07, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f))); _mm256_storeu_ps(output0 + 8, _out01); _mm256_storeu_ps(output0 + 24, _out03); _mm256_storeu_ps(output0 + 40, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 8; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_unaryop__ainv_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_fp32 // op(A') function: GB_tran__ainv_uint32_fp32 // C type: uint32_t // A type: float // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_fp32 ( uint32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
HTree.h
// // Created by Bangtian Liu on 6/28/19. // #ifndef PROJECT_HTREE_H #define PROJECT_HTREE_H #include "Util.h" #include "../sympiler/HTree.h" #include "HTree.h" //#include "hcds.h" #include <map> #include <unordered_set> struct HTree{ int *sidlen; int *sidoffset; int *sids; int depth; int *levelset; int *idx; int *clevelset; int *wpart; int *cidx; int cdepth; int *nblockset; int nrow; int *nblocks; int nb; int ncount; int *nxval; int *nyval; int *fblockset; int frow; int *fblocks; int fb; int fcount; int *fxval; int *fyval; int *Dim; int *lm; int nleaf; int numnodes; int *lids; int *lidsoffset; int *lidslen; int *tlchildren; int *trchildren; int *leaf; double *X; Internal::Ktype ktype; double h; int dim; double cdstime=0; std::vector<std::pair<double , int>> NN; std::vector<std::unordered_set<int>> pnids; std::vector<std::map<int, double>> snids; std::vector<std::multimap<double, int>> ordered_snids; }; void readHtree(HTree &tree, bool coarsing, bool cbs) { std::string nn("../sympiler/NN.bin"); int len = preprocesoffset(nn); int *NN = (int *)malloc(sizeof(int)*len); double *dist = (double *)malloc(sizeof(double)*len); bin2read(nn, NN, len); std::string distNN("../sympiler/distNN.bin"); bin2read(distNN, dist, len); tree.NN.resize(len); #pragma omp parallel for for(int i=0; i<len; i++) { tree.NN[i] = make_pair(dist[i], NN[i]); } if(coarsing){ std::string clevel("../sympiler/clevelset.bin"); int len = preprocesoffset(clevel); tree.clevelset = (int *)malloc(sizeof(int)*len); bin2read(clevel, tree.clevelset,len); tree.cdepth=len-1; std::string cidx("../sympiler/cidx.bin"); len = preprocesoffset(cidx); tree.cidx = (int *)malloc(sizeof(int)*len); bin2read(cidx, tree.cidx,len); std::string wpart("../sympiler/wpart.bin"); len = preprocesoffset(wpart); tree.wpart = (int *)malloc(sizeof(int)*len); bin2read(wpart, tree.wpart, len); std::string level("../sympiler/levelset.bin"); len = preprocesoffset(level); tree.levelset = (int *)malloc(sizeof(int)*len); bin2read(level, tree.levelset, len); tree.depth=len-1; std::string idx("../sympiler/idx.bin"); len = preprocesoffset(idx); tree.idx = (int *)malloc(sizeof(int)*len); bin2read(idx, tree.idx, len); } else{ std::string level("../sympiler/levelset.bin"); len = preprocesoffset(level); tree.levelset = (int *)malloc(sizeof(int)*len); bin2read(level, tree.levelset, len); tree.depth=len-1; std::string idx("../sympiler/idx.bin"); len = preprocesoffset(idx); tree.idx = (int *)malloc(sizeof(int)*len); bin2read(idx, tree.idx, len); } if(cbs){ std::string nbset("../sympiler/nblockset.bin"); len = preprocesoffset(nbset); tree.nrow=len; tree.nblockset = (int *)malloc(sizeof(int)*len); bin2read(nbset, tree.nblockset, len); std::string nblock("../sympiler/nblocks.bin"); len = preprocesoffset(nblock); tree.nb=len; tree.nblocks = (int *)malloc(sizeof(int)*len); bin2read(nblock, tree.nblocks, len); std::string nxval("../sympiler/nxval.bin"); len = preprocesoffset(nxval); tree.ncount=len; tree.nxval = (int *)malloc(sizeof(int)*len); bin2read(nxval, tree.nxval,len); std::string nyval("../sympiler/nyval.bin"); len = preprocesoffset(nyval); tree.nyval = (int *)malloc(sizeof(int)*len); bin2read(nyval, tree.nyval,len); std::string fbset("../sympiler/fblockset.bin"); len = preprocesoffset(fbset); tree.frow=len; tree.fblockset = (int *)malloc(sizeof(int)*len); bin2read(fbset, tree.fblockset, len); std::string fblock("../sympiler/fblocks.bin"); len = preprocesoffset(fblock); tree.fb=len; tree.fblocks = (int *)malloc(sizeof(int)*len); bin2read(fblock, tree.fblocks, len); std::string fxval("../sympiler/fxval.bin"); len = preprocesoffset(fxval); tree.fcount=len; tree.fxval = (int *)malloc(sizeof(int)*len); bin2read(fxval, tree.fxval,len); std::string fyval("../sympiler/fyval.bin"); len = preprocesoffset(fyval); tree.fyval = (int *)malloc(sizeof(int)*len); bin2read(fyval, tree.fyval,len); } else { std::string nxval("../sympiler/nxval.bin"); len = preprocesoffset(nxval); tree.ncount=len; tree.nxval = (int *)malloc(sizeof(int)*len); bin2read(nxval, tree.nxval,len); std::string nyval("../sympiler/nyval.bin"); len = preprocesoffset(nyval); tree.nyval = (int *)malloc(sizeof(int)*len); bin2read(nyval, tree.nyval,len); std::string fxval("../sympiler/fxval.bin"); len = preprocesoffset(fxval); tree.fcount=len; tree.fxval = (int *)malloc(sizeof(int)*len); bin2read(fxval, tree.fxval,len); std::string fyval("../sympiler/fyval.bin"); len = preprocesoffset(fyval); tree.fyval = (int *)malloc(sizeof(int)*len); bin2read(fyval, tree.fyval,len); } std::string dim("../sympiler/dim.bin"); len = preprocesoffset(dim); tree.nleaf = len; tree.Dim = (int *)malloc(sizeof(int)*len); bin2read(dim, tree.Dim, len); tree.numnodes = 2*tree.nleaf-1; tree.pnids.resize(tree.numnodes); tree.snids.resize(tree.numnodes); tree.ordered_snids.resize(tree.numnodes); std::string lm("../sympiler/leafmap.bin"); len = preprocesoffset(lm); tree.lm = (int *)malloc(sizeof(int)*len); bin2read(lm, tree.lm, len); tree.leaf = (int *)malloc(sizeof(int)*tree.nleaf); memset(tree.leaf, 0, sizeof(int)*tree.nleaf); for(int i=0; i<len; i++) { if(tree.lm[i]!=-1){ int idx = tree.lm[i]; tree.leaf[idx] = i; } } std::string lids("../sympiler/lids.bin"); len = preprocesoffset(lids); tree.lids = (int *)malloc(sizeof(int)*len); bin2read(lids, tree.lids, len); std::string lidsoffset("../sympiler/lidsoffset.bin"); tree.lidsoffset = (int *)malloc(sizeof(int)*tree.numnodes); bin2read(lidsoffset, tree.lidsoffset, tree.numnodes); std::string lidslen("../sympiler/lidslen.bin"); tree.lidslen = (int *)malloc(sizeof(int)*tree.numnodes); bin2read(lidslen, tree.lidslen, tree.numnodes); std::string tlc("../sympiler/lchildren.bin"); len = preprocesoffset(tlc); tree.tlchildren = (int *)malloc(sizeof(int)*len); bin2read(tlc, tree.tlchildren, len); std::string trc("../sympiler/rchildren.bin"); tree.trchildren = (int *)malloc(sizeof(int)*len); bin2read(trc, tree.trchildren, len); } void coarloadtree(HTree &tree, clustertree &ctree) { ctree.postw.resize(tree.cdepth); for(int i=0; i<tree.cdepth; i++) { int nwparts=tree.clevelset[i+1]-tree.clevelset[i]; ctree.postw[i].resize(nwparts); for(int j=tree.clevelset[i]; j<tree.clevelset[i+1]; j++) { int loc=j-tree.clevelset[i]; int nnodes = tree.wpart[j+1] - tree.wpart[j]; ctree.postw[i][loc].reserve(nnodes); for(int k = tree.wpart[j]; k<tree.wpart[j+1]; k++) { int idx = tree.idx[k]; ctree.postw[i][loc].push_back(idx); } } } } void loadtree(HTree &tree, clustertree &ctree) { ctree.levelsets.resize(tree.depth); for(int i=0; i<tree.depth; i++) { int nnodes = tree.levelset[i+1] - tree.levelset[i]; ctree.levelsets[i].reserve(nnodes); for(int j = tree.levelset[i]; j<tree.levelset[i+1];j++) { int idx = tree.idx[j]; ctree.levelsets[i].push_back(idx); } } } #endif //PROJECT_HTREE_H
raytracer.h
#pragma once #include "type.h" #include "screen.h" #include "camera.h" #include "scene.h" #include "primitive.h" namespace render { class raytracer : public gpu { public: enum mode { RAYTRACING, PATHTRACING, }; public: CPU raytracer(const camera &c, enum mode m = RAYTRACING, vector3f bg = vector3f(0,0,0)); CPU void setmode(enum mode m); CPU void setbackground(vector3f c); GPU void render(thread_ctx *ctx, const scene &sc, screen &scrn); //GPU bool render(const scene &sc, screen &scrn, int spp = 128); private: GPU vector3f trace(thread_ctx *ctx, ray r, int depth); GPU vector3f raytracing(thread_ctx *ctx, const ray &r, const hit &h, int depth); GPU vector3f pathtracing(thread_ctx *ctx, const ray &r, const hit &h, int depth); private: //material GPU vector3f light(const ray &r, const hit &h, int depth); GPU vector3f glass(thread_ctx *ctx, const ray &r, const hit &h, int depth); GPU vector3f diffuse(thread_ctx *ctx, const ray &r, const hit &h, int depth); private: enum mode mode_; const camera &camera_; const scene *sc = nullptr; vector3f background = vector3f(0, 0, 0); }; GPU vector3f raytracer::glass(thread_ctx *ctx, const ray &r, const hit &h, int depth) { auto &dir = r.direction; auto &N = h.normal; auto m = materialX(h.obj); auto hit_point = r.move(h.distance); vector3f reflect_dir = optics::reflect(dir, N).normalized(); vector3f refract_dir = optics::refract(dir, N, m->ior).normalized(); float reflect_offset = reflect_dir.dot(N) >= 0 ? EPSILON : -EPSILON; float refract_offset = refract_dir.dot(N) >= 0 ? EPSILON : -EPSILON; ray reflect_ray(hit_point + reflect_offset * N, reflect_dir); ray refract_ray(hit_point + refract_offset * N, refract_dir); vector3f reflect_color, refract_color; reflect_color = trace(ctx, reflect_ray, depth + 1); refract_color = trace(ctx, refract_ray, depth + 1); float kr = optics::fresnel(dir, N, m->ior); return reflect_color * kr + refract_color * (1 - kr); } GPU vector3f raytracer::diffuse(thread_ctx *ctx, const ray &r, const hit &h, int depth) { #if 0 auto m = h.obj->material(); auto &N = h.normal; auto &dir = r.direction; auto hit_point = r.move(h.distance); vector3f ambient(0,0,0), specular(0,0,0); float sign = dir.dot(N) < 0 ? 1.f : -1.f; vector3f shadow_pos = hit_point + sign * N * EPSILON; for (auto li:sc->getlights()) { hit hs; float r = (li->position() - hit_point).norm(); vector3f l = (li->position() - hit_point).normalized(); vector3f v = -dir; vector3f I = li->material()->albedo(h.texcoord); vector3f h = (v+l).normalized(); //compute light ray shadowray(shadow_pos, l); if (!sc->intersect(shadowray, hs) || hs.distance >= r || hs.obj == li) ambient += I * std::max(0.f, l.dot(N)); specular += I * std::pow(std::max(0.f, h.dot(N)), 25.f); } vector3f diffuse = m->albedo(h.texcoord); return ambient.cwiseProduct(diffuse) * m->Kd() + specular * m->Ks(); #endif return vector3f(0,0,0); } GPU vector3f raytracer::light(const ray &r, const hit &h, int depth) { return depth == 0 ? materialX(h.obj)->albedo(h.texcoord): vector3f(0,0,0); } GPU vector3f raytracer::pathtracing(thread_ctx *ctx, const ray &rr, const hit &hh, int depth) { vector3f L(0,0,0); vector3f frac(1,1,1); ray r = rr; hit h = hh; for (int i = 0; i < 10; i++) { auto &N = h.normal; auto wo = -r.direction; auto m = materialX(h.obj); if (m->type == material::LIGHT) return light(r, h, depth); { vector3f L_dir(0,0,0); hit hit_l, hit_middle; float light_pdf = sc->samplelight(ctx, hit_l); auto dir = hit_l.point - h.point; auto wi = dir.normalized(); auto eye = h.point + wi * EPSILON; ray r(eye, wi); sc->intersect(r, hit_middle); // if (hit_middle.obj == hit_l.obj) { //has no middle if (hit_middle.distance - dir.norm() > -0.001f) { //has no middle auto L_i = materialX(hit_l.obj)->albedo(hit_l.texcoord); auto f_r = m->brdf(h, wi, wo); auto cos = fmaxf(N.dot(wi), 0.f); auto cos_prime = fmaxf(hit_l.normal.dot(-wi), 0.f); auto r_square = dir.squaredNorm(); light_pdf += EPSILON; /* std::cout << "f_r:" << f_r << std::endl; std::cout << "L_i:" << L_i << std::endl; std::cout << "cos_prim:" << cos_prime << std::endl; std::cout << "cos:" << cos << std::endl; std::cout << " demo:" << (r_square * light_pdf) << std::endl; */ L_dir = L_i.cwiseProduct(f_r) * cos_prime * cos / (r_square * light_pdf); L += L_dir.cwiseProduct(frac); //std::cout << " Ldir:" << L_dir << std::endl; } } if (1) { float ksi = ctx->rand(); if (ksi < 0.8) { vector3f L_indir(0,0,0); auto wi = m->sample(ctx, wo, N); float pdf_ = m->pdf(wi, wo, N) + EPSILON; auto f_r = m->brdf(h, wi, wo); auto cos = fmaxf(N.dot(wi), 0.f); auto f = f_r * cos / (pdf_ * 0.8f); frac = frac.cwiseProduct(f); ray rx(h.point + EPSILON * wi, wi); r = rx; depth += 1; if (!sc->intersect(r, h)) { L += background.cwiseProduct(frac); break; } } else { break; } } } return L; } GPU vector3f raytracer::raytracing(thread_ctx *ctx, const ray &r, const hit &h, int depth) { vector3f hitcolor(0,0,0); if (depth > 6) return hitcolor; switch (materialX(h.obj)->type) { case material::LIGHT: return light(r, h, depth); case material::GLASS: return glass(ctx, r, h, depth); case material::DIFFUSE: return diffuse(ctx, r, h, depth); default: return vector3f(0,0,0); } } GPU vector3f raytracer::trace(thread_ctx *ctx, ray r, int depth) { hit h; if (!sc->intersect(r, h)) return background; // if (mode_ == RAYTRACING) // return raytracing(ctx, r, h, depth); // else vector3f c = pathtracing(ctx, r, h, depth); return c; } GPU void raytracer::render(thread_ctx *ctx, const scene &sc, screen &scrn) { int i = ctx->x; int j = ctx->y; this->sc = &sc; int width = scrn.getsize().x(); int height = scrn.getsize().y(); float aspect = scrn.aspect(); float x = (i + 0.5f) / (float)width; float y = (j + 0.5f) / (float)height; ray r = camera_.lookat(aspect, x, y); auto c = tone_mapping(trace(ctx, r, 0)); scrn.add(i, j, c); } #if 0 GPU bool raytracer::render(const scene &sc, screen &scrn, int spp) { this->sc = &sc; auto &size = scrn.getsize(); float aspect = scrn.aspect(); int width = size.x(); int height = size.y(); int total = (uint64_t)width * height; #pragma omp parallel for for (uint64_t n = 0; n < total; n++) { uint64_t w = n; uint32_t i = int(w % width); uint32_t j = int(w / width); float x = (i + 0.5f) / (float)width; float y = (j + 0.5f) / (float)height; ray r = camera_.lookat(aspect, x, y); auto c = tone_mapping(trace(ctx, r, 0)); scrn.add(i, j, c); } return true; } #endif CPU void raytracer::setmode(enum mode m) { mode_ = m; } CPU void raytracer::setbackground(vector3f c) { background = c; } CPU raytracer::raytracer(const camera &c, raytracer::mode m, vector3f bg): mode_(m),camera_(c), background(bg) { } }
shmem_ctx.c
/* * Copyright (c) 2017 Intel Corporation. All rights reserved. * This software is available to you under the BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * This test is derived from an example provided in the OpenSHMEM 1.4 * specification. Additional copyrights may apply. * */ #include <stdio.h> #include <shmem.h> long pwrk[SHMEM_REDUCE_MIN_WRKDATA_SIZE]; long psync[SHMEM_REDUCE_SYNC_SIZE]; long task_cntr = 0; /* Next task counter */ long tasks_done = 0; /* Tasks done by this PE */ long total_done = 0; /* Total tasks done by all PEs */ int main(void) { int tl, i, ret; long ntasks = 1024; /* Total tasks per PE */ for (i = 0; i < SHMEM_REDUCE_SYNC_SIZE; i++) psync[i] = SHMEM_SYNC_VALUE; ret = shmem_init_thread(SHMEM_THREAD_MULTIPLE, &tl); if (tl != SHMEM_THREAD_MULTIPLE || ret != 0) { printf("Init failed (requested thread level %d, got %d, ret %d)\n", SHMEM_THREAD_MULTIPLE, tl, ret); if (ret == 0) { shmem_global_exit(1); } else { return ret; } } int me = shmem_my_pe(); int npes = shmem_n_pes(); #pragma omp parallel reduction (+:tasks_done) { shmem_ctx_t ctx; int task_pe = me, pes_done = 0; int ret = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (ret != 0) { printf("%d: Error creating context (%d)\n", me, ret); shmem_global_exit(2); } /* Process tasks on all PEs, starting with the local PE. After * all tasks on a PE are completed, help the next PE. */ while (pes_done < npes) { long task = shmem_ctx_long_atomic_fetch_inc(ctx, &task_cntr, task_pe); while (task < ntasks) { /* Perform task (task_pe, task) */ tasks_done++; task = shmem_ctx_long_atomic_fetch_inc(ctx, &task_cntr, task_pe); } pes_done++; task_pe = (task_pe + 1) % npes; } shmem_ctx_destroy(ctx); } shmem_long_sum_to_all(&total_done, &tasks_done, 1, 0, 0, npes, pwrk, psync); int result = (total_done != ntasks * npes); if (me == 0 && result) printf("Error: total_done is %ld, expected %ld\n", total_done, ntasks * npes); shmem_finalize(); return result; }
pragmaScope2.c
// This example shows the multiple pragmas in the same scope with a following statement without brackets. int main(){ int i,j; int n=10, m=10; int a[n][m]; for(i=0;i<n; i++) for(j=0;j<n; j++) a[i][j]= 0; for (i=0;i<n-1;i++) #pragma omp parallel #pragma omp for for (j=0;j<m-1;j++) a[i][j]=a[i+1][j+1]; }
bucle-forModificado.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n = 9; if(argc < 2) { fprintf(stderr,"\n[ERROR] - Falta no iteraciones \n"); exit(-1); } n = atoi(argv[1]); #pragma omp parallel for for (i=0; i<n; i++) printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i); return(0); }
kmp_set_dispatch_buf.c
// RUN: %libomp-compile && %libomp-run 7 // RUN: %libomp-run 0 && %libomp-run -1 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run 7 // RUN: %libomp-run 1 && %libomp-run 2 && %libomp-run 5 // UNSUPPORTED: clang-11 #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 7 #define MY_MAX 200 #define MY_MIN -200 #ifndef MY_SCHEDULE # define MY_SCHEDULE dynamic #endif int num_disp_buffers, num_loops; int a, b, a_known_value, b_known_value; int test_kmp_set_disp_num_buffers() { int success = 1; a = 0; b = 0; // run many small dynamic loops to stress the dispatch buffer system #pragma omp parallel { int i,j; for (j = 0; j < num_loops; j++) { #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } } // detect failure if (a != a_known_value || b != b_known_value) { success = 0; printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); } return success; } int main(int argc, char** argv) { int i,j; int num_failed=0; if (argc != 2) { fprintf(stderr, "usage: %s num_disp_buffers\n", argv[0]); exit(1); } // set the number of dispatch buffers num_disp_buffers = atoi(argv[1]); kmp_set_disp_num_buffers(num_disp_buffers); // figure out the known values to compare with calculated result a_known_value = 0; b_known_value = 0; // if specified to use bad num_disp_buffers set num_loops // to something reasonable if (num_disp_buffers <= 0) num_loops = 10; else num_loops = num_disp_buffers*10; for (j = 0; j < num_loops; j++) { for (i = MY_MIN; i < MY_MAX; i+=INCR) a_known_value++; for (i = MY_MAX; i >= MY_MIN; i-=INCR) b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_disp_num_buffers()) { num_failed++; } } return num_failed; }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class VersionTuple; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. mutable IdentifierInfo *Ident_instancetype; /// \brief Identifier for "introduced". IdentifierInfo *Ident_introduced; /// \brief Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// \brief Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// \brief Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// \brief Identifier for "message". IdentifierInfo *Ident_message; /// \brief Identifier for "strict". IdentifierInfo *Ident_strict; /// \brief Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++0x contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// \brief When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// \brief RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } unsigned getDepth() const { return Depth; } }; /// Factory object for creating AttributeList objects. AttributeFactory AttrFactory; /// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// \brief Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren; } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square; } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace; } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// \brief Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// \brief Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed); PP.Lex(Tok); PP.EnterToken(Next); } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) --ParenCount; // Don't let unbalanced )'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) --BracketCount; // Don't let unbalanced ]'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) --BraceCount; // Don't let unbalanced }'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// \brief Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// \brief Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// \brief Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// \brief Initialize all pragma handlers. void initializePragmaHandlers(); /// \brief Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// \brief Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// \brief Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// \brief Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// \brief Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// \brief Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// \brief Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// \brief Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// \brief Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// \brief Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// \brief Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// \brief Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// \brief Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// \brief Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// \brief Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// \brief Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// \brief Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC1); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// \brief Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// \brief The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// \brief The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// \brief Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// \brief RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// \brief Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// \brief Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// othewise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// \brief The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// \brief Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// \brief Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// \brief Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// othewise, it is a tag declaration. bool TemplateScope : 1; /// \brief Whether this class is an __interface. bool IsInterface : 1; /// \brief The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// \brief The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// \brief RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// \brief Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// \brief Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// \brief The kind of template we are parsing. enum { /// \brief We are not parsing a template at all. NonTemplate = 0, /// \brief We are parsing a template declaration. Template, /// \brief We are parsing an explicit specialization. ExplicitSpecialization, /// \brief We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// \brief The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// \brief The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// \brief The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// \brief Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers& VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, void *Info, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, std::function<void()> Completer = nullptr); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while condition expression. Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, bool AllowOpenMPStandalone = false); enum AllowedConstructsKind { /// \brief Allow any declarations, statements, OpenMP directives. ACK_Any, /// \brief Allow only statements and non-standalone OpenMP directives. ACK_StatementsOpenMPNonStandalone, /// \brief Allow statements and all executable OpenMP directives ACK_StatementsOpenMPAnyExecutable }; StmtResult ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs); StmtResult ParseCaseStatement(bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// \brief Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// \brief Parse the block; this code is always used. IEB_Parse, /// \brief Skip the block entirely; this code is never used. IEB_Skip, /// \brief Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// \brief Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// \brief The location of the initial keyword. SourceLocation KeywordLoc; /// \brief Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// \brief Nested-name-specifier preceding the name. CXXScopeSpec SS; /// \brief The name we're looking for. UnqualifiedId Name; /// \brief The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, AccessSpecifier& CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DSC_normal: case DSC_template_param: case DSC_class: case DSC_top_level: case DSC_objc_method_result: case DSC_condition: return false; case DSC_template_type_arg: case DSC_type_specifier: case DSC_trailing: case DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DSC_normal: case DSC_template_param: case DSC_class: case DSC_top_level: case DSC_condition: case DSC_type_specifier: return true; case DSC_objc_method_result: case DSC_template_type_arg: case DSC_trailing: case DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(unsigned Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// \brief When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context); void ParseDeclarationSpecifiers(DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, Declarator::TheContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// \brief Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// \brief Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// \brief Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// \brief Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// \brief Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// \brief Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. Error ///< Can't be any of the above! }; /// \brief Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// \brief Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *HasMissingTypename = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// \brief Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, Declarator::TheContext Context = Declarator::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); // Check for the start of a C++11 attribute-specifier-seq in a context where // an attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!getLangOpts().CPlusPlus11) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); void ProhibitAttributes(ParsedAttributesWithRange &attrs) { if (!attrs.Range.isValid()) return; DiagnoseProhibitedAttributes(attrs); attrs.clear(); } void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs); // Forbid C++11 attributes that appear on certain syntactic // locations which standard permits but we don't supported yet, // for example, attributes appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// \brief Skip C++11 attributes and return the end location of the last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// \brief Diagnose and skip C++11 attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// \brief Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// \brief Parses a C++-style attribute argument list. Returns true if this /// results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// \brief Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc, std::vector<IdentifierInfo*>& Ident, std::vector<SourceLocation>& NamespaceLoc, unsigned int index, SourceLocation& InlineLoc, ParsedAttributes& attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(unsigned Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(unsigned Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, AttributeList *Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// \brief Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// \brief Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// \brief Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// \brief Parses declarative or executable directive. /// /// \param Allowed ACK_Any, if any directives are allowed, /// ACK_StatementsOpenMPAnyExecutable - if any executable directives are /// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone /// executable directives are allowed. /// StmtResult ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed); /// \brief Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// \brief Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind); /// \brief Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind); /// \brief Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind); /// \brief Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind); /// \brief Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; CXXScopeSpec ReductionIdScopeSpec; DeclarationNameInfo ReductionId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation& TemplateKWLoc, UnqualifiedId &Result); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none, AttributeList *AccessAttrs = nullptr); Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS, AttributeList *AccessAttrs); Decl *ParseSingleDeclarationAfterTemplate( unsigned Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, AccessSpecifier AS=AS_none, AttributeList *AccessAttrs = nullptr); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); Decl *ParseTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseTypeParameter(unsigned Depth, unsigned Position); Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool IsTemplateArgumentList(unsigned Skip = 0); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(unsigned Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(); DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; register ssize_t i; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); for (i=0; i < (ssize_t) strlen(map); i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (strlen(map) == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if (IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones == (Image *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "SubimageSpecificationReturnsNoImages","`%s'",read_info->filename); else { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickPathExtent]; const char *option; const StringInfo *profile; ssize_t option_type; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if (*magick_path == '\0' && *next->magick == '\0') (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; value=GetImageProperty(next,"tiff:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"exif:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; register const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); p++; length=0; blob=Base64Decode(p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; register Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { register Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { register ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
ast-dump-openmp-target-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:4:1, col:24> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:10:1, col:24> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:17:1, col:36> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:24:1, col:36> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetSimdDirective {{.*}} <line:31:1, col:36> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:25, col:35> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:34> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:34> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
GB_binop__land_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int16) // A.*B function (eWiseMult): GB (_AemultB_08__land_int16) // A.*B function (eWiseMult): GB (_AemultB_02__land_int16) // A.*B function (eWiseMult): GB (_AemultB_04__land_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int16) // A*D function (colscale): GB (_AxD__land_int16) // D*A function (rowscale): GB (_DxB__land_int16) // C+=B function (dense accum): GB (_Cdense_accumB__land_int16) // C+=b function (dense accum): GB (_Cdense_accumb__land_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int16) // C=scalar+B GB (_bind1st__land_int16) // C=scalar+B' GB (_bind1st_tran__land_int16) // C=A+scalar GB (_bind2nd__land_int16) // C=A'+scalar GB (_bind2nd_tran__land_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT16 || GxB_NO_LAND_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Expr.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP clause. /// class OMPClause { /// \brief Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// \brief Ending location of the clause. SourceLocation EndLoc; /// \brief Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// \brief Returns the starting location of the clause. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns the ending location of the clause. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit; protected: /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S) { PreInit = S; } OMPClauseWithPreInit(const OMPClause *This) : PreInit(nullptr) { assert(get(This) && "get is not tuned for pre-init."); } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate; protected: /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This), PostUpdate(nullptr) { assert(get(This) && "get is not tuned for post-update."); } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// \brief This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of variables in the list. unsigned NumVars; protected: /// \brief Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// \brief Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } /// \brief Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} public: typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// \brief This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. /// class OMPIfClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Location of ':' (if any). SourceLocation ColonLoc; /// \brief Directive name modifier for the clause. OpenMPDirectiveKind NameModifier; /// \brief Name modifier location. SourceLocation NameModifierLoc; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } /// \brief Set directive name modifier for the clause. /// void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// \brief Set location of directive name modifier for the clause. /// void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// \brief Set location of ':'. /// void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. /// OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {} /// \brief Build an empty clause. /// OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), LParenLoc(), Condition(nullptr), ColonLoc(), NameModifier(OMPD_unknown), NameModifierLoc() {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// \brief Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// \brief Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } child_range children() { return child_range(&Condition, &Condition + 1); } }; /// \brief This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. /// class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'if' clause. Stmt *Condition; /// \brief Set condition. /// void setCondition(Expr *Cond) { Condition = Cond; } public: /// \brief Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. /// OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// \brief Build an empty clause. /// OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Condition(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } child_range children() { return child_range(&Condition, &Condition + 1); } }; /// \brief This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. /// class OMPNumThreadsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Condition of the 'num_threads' clause. Stmt *NumThreads; /// \brief Set condition. /// void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// \brief Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), LParenLoc(LParenLoc), NumThreads(NumThreads) {} /// \brief Build an empty clause. /// OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumThreads(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } }; /// \brief This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. /// class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Safelen; /// \brief Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// \brief Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// \brief Build an empty clause. /// explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Safelen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } child_range children() { return child_range(&Safelen, &Safelen + 1); } }; /// \brief This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. /// class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Simdlen; /// \brief Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// \brief Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// \brief Build an empty clause. /// explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Simdlen(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } }; /// \brief This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. /// class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. /// class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'default' clause. OpenMPDefaultClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clauses. /// /// \param K Argument of clause. /// void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// \brief Set argument location. /// /// \param KLoc Argument location. /// void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. /// class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind; /// \brief Start location of the kind in source code. SourceLocation KindKwLoc; /// \brief Set kind of the clause. /// /// \param K Kind of clause. /// void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// \brief Set clause kind location. /// /// \param KLoc Kind location. /// void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// \brief Build an empty clause. /// OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown), KindKwLoc(SourceLocation()) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// \brief Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. /// class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind; /// \brief Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// \brief Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// \brief Start location of the schedule ind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// \brief Set the first schedule modifier. /// /// \param M Schedule modifier. /// void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// \brief Set the second schedule modifier. /// /// \param M Schedule modifier. /// void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// \brief Set location of the first schedule modifier. /// void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// \brief Set location of the second schedule modifier. /// void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// \brief Set schedule modifier location. /// /// \param M Schedule modifier location. /// void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier /// OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// \brief Build an empty clause. /// explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this), Kind(OMPC_SCHEDULE_unknown), ChunkSize(nullptr) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// \brief Get kind of the clause. /// OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// \brief Get the first modifier of the clause. /// OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// \brief Get the second modifier of the clause. /// OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getScheduleKindLoc() { return KindLoc; } /// \brief Get the first modifier location. /// SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// \brief Get the second modifier location. /// SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. /// const Expr *getChunkSize() const { return ChunkSize; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } }; /// \brief This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. /// class OMPOrderedClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Number of for-loops. Stmt *NumForLoops; /// \brief Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// \brief Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPOrderedClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// \brief Build an empty clause. /// explicit OMPOrderedClause() : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumForLoops(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } }; /// \brief This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. /// class OMPNowaitClause : public OMPClause { public: /// \brief Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. /// class OMPUntiedClause : public OMPClause { public: /// \brief Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. /// class OMPMergeableClause : public OMPClause { public: /// \brief Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. /// class OMPReadClause : public OMPClause { public: /// \brief Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. /// class OMPWriteClause : public OMPClause { public: /// \brief Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// class OMPUpdateClause : public OMPClause { public: /// \brief Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. /// class OMPCaptureClause : public OMPClause { public: /// \brief Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. /// class OMPSeqCstClause : public OMPClause { public: /// \brief Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. /// class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. /// static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// \brief This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. /// class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// \brief Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// \brief Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// \brief Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// \brief This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. // friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// \brief Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. /// static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; /// \brief Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// \brief This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. /// class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// \brief This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. /// class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// \brief Name of custom operator. DeclarationNameInfo NameInfo; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this), ColonLoc(), QualifierLoc(), NameInfo() {} /// \brief Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// \brief Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// \brief Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// \brief Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// \brief Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// \brief Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// \brief Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. /// static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// \brief Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// \brief This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. /// class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier; /// \brief Location of linear modifier if any. SourceLocation ModifierLoc; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// \brief Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// \brief Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this), Modifier(OMPC_LINEAR_val), ModifierLoc(), ColonLoc() {} /// \brief Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } /// MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// \brief Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// \brief Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// \brief Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// \brief Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// \brief Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// \brief Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// \brief Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// \brief Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// \brief Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// \brief Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// \brief Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// \brief Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); typedef MutableArrayRef<Expr *>::iterator privates_iterator; typedef ArrayRef<const Expr *>::iterator privates_const_iterator; typedef llvm::iterator_range<privates_iterator> privates_range; typedef llvm::iterator_range<privates_const_iterator> privates_const_range; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } typedef MutableArrayRef<Expr *>::iterator updates_iterator; typedef ArrayRef<const Expr *>::iterator updates_const_iterator; typedef llvm::iterator_range<updates_iterator> updates_range; typedef llvm::iterator_range<updates_const_iterator> updates_const_range; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } typedef MutableArrayRef<Expr *>::iterator finals_iterator; typedef ArrayRef<const Expr *>::iterator finals_const_iterator; typedef llvm::iterator_range<finals_iterator> finals_range; typedef llvm::iterator_range<finals_const_iterator> finals_const_range; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// \brief This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. /// class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Location of ':'. SourceLocation ColonLoc; /// \brief Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// \brief Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. /// OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of variables. /// explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), ColonLoc(SourceLocation()) {} public: /// \brief Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. /// static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// \brief Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// \brief Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// \brief Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// \brief This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. /// class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. /// static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// \brief This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. /// class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// \brief Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// \brief Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// \brief Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// \brief Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// \brief Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. /// static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator; typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator; typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range; typedef llvm::iterator_range<helper_expr_const_iterator> helper_expr_const_range; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// \brief This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. /// class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// \brief This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. /// class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend TrailingObjects; friend OMPVarListClause; friend class OMPClauseReader; /// \brief Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind; /// \brief Dependency type location. SourceLocation DepLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Build an empty clause. /// /// \param N Number of variables. /// explicit OMPDependClause(unsigned N) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), DepKind(OMPC_DEPEND_unknown) {} /// \brief Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// \brief Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. static OMPDependClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL); /// \brief Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N); /// \brief Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// \brief Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Set the loop counter value for the depend clauses with 'sink|source' kind /// of dependency. Required for codegen. void setCounterValue(Expr *V); /// Get the loop counter value. Expr *getCounterValue(); /// Get the loop counter value. const Expr *getCounterValue() const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// \brief This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. /// class OMPDeviceClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Device number. Stmt *Device; /// \brief Set the device number. /// /// \param E Device number. /// void setDevice(Expr *E) { Device = E; } public: /// \brief Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPDeviceClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), LParenLoc(LParenLoc), Device(E) {} /// \brief Build an empty clause. /// OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Device(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// \brief Return device number. Expr *getDevice() const { return cast<Expr>(Device); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } child_range children() { return child_range(&Device, &Device + 1); } }; /// \brief This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. /// class OMPThreadsClause : public OMPClause { public: /// \brief Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. /// class OMPSIMDClause : public OMPClause { public: /// \brief Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: // \brief Class that represents a component of a mappable expression. E.g. // for an expression S.a, the first component is a declaration reference // expression associated with 'S' and the second is a member expression // associated with the field declaration 'a'. If the expression is an array // subscript it may not have any associated declaration. In that case the // associated declaration is set to nullptr. class MappableComponent { // \brief Expression associated with the component. Expr *AssociatedExpression = nullptr; // \brief Declaration associated with the declaration. If the component does // not have a declaration (e.g. array subscripts or section), this is set to // nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() {} explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // \brief List of components of an expression. This first one is the whole // expression and the last one is the base expression. typedef SmallVector<MappableComponent, 8> MappableExprComponentList; typedef ArrayRef<MappableComponent> MappableExprComponentListRef; // \brief List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. typedef SmallVector<MappableExprComponentList, 8> MappableExprComponentLists; typedef ArrayRef<MappableExprComponentList> MappableExprComponentListsRef; protected: // \brief Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // \brief Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<ValueDecl *> Declarations); }; /// \brief This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// \brief Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// \brief Number of component lists in this clause. unsigned NumComponentLists; /// \brief Total number of components in this clause. unsigned NumComponents; protected: /// \brief Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// \brief Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// \brief Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// \brief Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// \brief Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// \brief Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// \brief Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::DenseMap<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// \brief Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause - one /// list for each expression in the clause. /// \param NumComponents Total number of expression components in the clause. /// OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} public: /// \brief Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// \brief Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// \brief Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// \brief Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// \brief Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), RemainingLists(0u), PrevListSize(0u), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// \brief Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; typedef llvm::iterator_range<const_component_lists_iterator> const_component_lists_range; /// \brief Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// \brief Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. typedef ArrayRef<ValueDecl *>::iterator const_all_decls_iterator; typedef llvm::iterator_range<const_all_decls_iterator> const_all_decls_range; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } typedef ArrayRef<unsigned>::iterator const_all_num_lists_iterator; typedef llvm::iterator_range<const_all_num_lists_iterator> const_all_num_lists_range; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } typedef ArrayRef<unsigned>::iterator const_all_lists_sizes_iterator; typedef llvm::iterator_range<const_all_lists_sizes_iterator> const_all_lists_sizes_range; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } typedef ArrayRef<MappableComponent>::iterator const_all_components_iterator; typedef llvm::iterator_range<const_all_components_iterator> const_all_components_range; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } }; /// \brief This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. /// class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Map type modifier for the 'map' clause. OpenMPMapClauseKind MapTypeModifier; /// \brief Map type for the 'map' clause. OpenMPMapClauseKind MapType; /// \brief Is this an implicit map type or not. bool MapTypeIsImplicit; /// \brief Location of the map type. SourceLocation MapLoc; /// \brief Colon location. SourceLocation ColonLoc; /// \brief Set type modifier for the clause. /// /// \param T Type Modifier for the clause. /// void setMapTypeModifier(OpenMPMapClauseKind T) { MapTypeModifier = T; } /// \brief Set type for the clause. /// /// \param T Type for the clause. /// void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// \brief Set type location. /// /// \param TLoc Type location. /// void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// \brief Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// \brief Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapTypeModifier Map type modifier. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapTypeModifier(MapTypeModifier), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapTypeModifier(OMPC_MAP_unknown), MapType(OMPC_MAP_unknown), MapTypeIsImplicit(false), MapLoc() {} public: /// \brief Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param TypeModifier Map type modifier. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. /// static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, OpenMPMapClauseKind TypeModifier, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// \brief Creates an empty clause with the place for for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); /// \brief Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// \brief Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// \brief Fetches the map type modifier for the clause. OpenMPMapClauseKind getMapTypeModifier() const LLVM_READONLY { return MapTypeModifier; } /// \brief Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// \brief Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } }; /// \brief This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. /// class OMPNumTeamsClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief NumTeams number. Stmt *NumTeams; /// \brief Set the NumTeams number. /// /// \param E NumTeams number. /// void setNumTeams(Expr *E) { NumTeams = E; } public: /// \brief Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPNumTeamsClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTeams(E) {} /// \brief Build an empty clause. /// OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumTeams(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// \brief Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } }; /// \brief This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. /// class OMPThreadLimitClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief ThreadLimit number. Stmt *ThreadLimit; /// \brief Set the ThreadLimit number. /// /// \param E ThreadLimit number. /// void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// \brief Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPThreadLimitClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), LParenLoc(LParenLoc), ThreadLimit(E) {} /// \brief Build an empty clause. /// OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), ThreadLimit(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// \brief Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } }; /// \brief This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. /// class OMPPriorityClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Priority number. Stmt *Priority; /// \brief Set the Priority number. /// /// \param E Priority number. /// void setPriority(Expr *E) { Priority = E; } public: /// \brief Build 'priority' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc), Priority(E) {} /// \brief Build an empty clause. /// OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Priority(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// \brief Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } child_range children() { return child_range(&Priority, &Priority + 1); } }; /// \brief This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. /// class OMPGrainsizeClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *Grainsize; /// \brief Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// \brief Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc), Grainsize(Size) {} /// \brief Build an empty clause. /// explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Grainsize(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } }; /// \brief This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. /// class OMPNogroupClause : public OMPClause { public: /// \brief Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// \brief Build an empty clause. /// OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. /// class OMPNumTasksClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Safe iteration space distance. Stmt *NumTasks; /// \brief Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// \brief Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// OMPNumTasksClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTasks(Size) {} /// \brief Build an empty clause. /// explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), NumTasks(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } }; /// \brief This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. /// class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Hint expression of the 'hint' clause. Stmt *Hint; /// \brief Set hint expression. /// void setHint(Expr *H) { Hint = H; } public: /// \brief Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// \brief Build an empty clause. /// OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()), LParenLoc(SourceLocation()), Hint(nullptr) {} /// \brief Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// \brief Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } child_range children() { return child_range(&Hint, &Hint + 1); } }; /// \brief This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. /// class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind; /// \brief Start location of the schedule kind in source code. SourceLocation KindLoc; /// \brief Location of ',' (if any). SourceLocation CommaLoc; /// \brief Chunk size. Expr *ChunkSize; /// \brief Set schedule kind. /// /// \param K Schedule kind. /// void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set schedule kind start location. /// /// \param KLoc Schedule kind location. /// void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// \brief Set location of ','. /// /// \param Loc Location of ','. /// void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// \brief Set chunk size. /// /// \param E Chunk size. /// void setChunkSize(Expr *E) { ChunkSize = E; } public: /// \brief Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// \brief Build an empty clause. /// explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this), Kind(OMPC_DIST_SCHEDULE_unknown), ChunkSize(nullptr) {} /// \brief Get kind of the clause. /// OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// \brief Get location of ','. /// SourceLocation getCommaLoc() { return CommaLoc; } /// \brief Get chunk size. /// Expr *getChunkSize() { return ChunkSize; } /// \brief Get chunk size. /// const Expr *getChunkSize() const { return ChunkSize; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } }; /// \brief This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. /// class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// \brief Location of '('. SourceLocation LParenLoc; /// \brief Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier; /// \brief Locations of modifiers. SourceLocation ModifierLoc; /// \brief A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind; /// \brief Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// \brief Set defaultmap kind. /// /// \param K Defaultmap kind. /// void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// \brief Set the defaultmap modifier. /// /// \param M Defaultmap modifier. /// void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// \brief Set location of the defaultmap modifier. /// void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// \brief Sets the location of '('. /// /// \param Loc Location of '('. /// void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// \brief Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. /// void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// \brief Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier /// OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// \brief Build an empty clause. /// explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()), Modifier(OMPC_DEFAULTMAP_MODIFIER_unknown), Kind(OMPC_DEFAULTMAP_unknown) {} /// \brief Get kind of the clause. /// OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// \brief Get the modifier of the clause. /// OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// \brief Get location of '('. /// SourceLocation getLParenLoc() { return LParenLoc; } /// \brief Get kind location. /// SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// \brief Get the modifier location. /// SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. /// class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPToClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_to, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPToClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_to, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} public: /// \brief Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// static OMPToClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPToClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_to; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } }; /// \brief This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. /// class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// \brief Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPFromClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_from, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// \brief Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPFromClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_from, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} public: /// \brief Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// static OMPFromClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// \brief Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPFromClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_from; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. /// class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPUseDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_use_device_ptr, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPUseDevicePtrClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_use_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// static OMPUseDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPUseDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); typedef MutableArrayRef<Expr *>::iterator private_copies_iterator; typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator; typedef llvm::iterator_range<private_copies_iterator> private_copies_range; typedef llvm::iterator_range<private_copies_const_iterator> private_copies_const_range; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } typedef MutableArrayRef<Expr *>::iterator inits_iterator; typedef ArrayRef<const Expr *>::iterator inits_const_iterator; typedef llvm::iterator_range<inits_iterator> inits_range; typedef llvm::iterator_range<inits_const_iterator> inits_const_range; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_use_device_ptr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. /// class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend TrailingObjects; friend OMPVarListClause; friend OMPMappableExprListClause; friend class OMPClauseReader; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPIsDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_is_device_ptr, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. /// explicit OMPIsDevicePtrClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_is_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// static OMPIsDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. /// static OMPIsDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_is_device_ptr; } }; } // end namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
dft.c
#include <time.h> #include <math.h> #include <complex.h> #include <stdio.h> #include <stdlib.h> #define N (2 << 18) //Store the FFT of f in F, for n points void fft(float complex *F, float complex *f, int n, int stride) { if(n == 1) { F[0] = f[0]; } else { // Split the array and recursively process every 2*stride elements fft(F, f, n/2, 2*stride); fft(F+n/2, f+stride, n/2, 2*stride); float complex t; #pragma omp for private(t) for(int i=0;i<n/2;i++) { t = F[i]; F[i] = t + cexp(2.*M_PI*I*i/n)*F[i+n/2]; F[i+n/2] = t - cexp(2.*M_PI*I*i/n)*F[i+n/2]; } } } // Generate a delta function inital value void delta(float complex *input) { for(int i=0;i<N;i++) { input[i] = 0.; } input[0] = 1.; } // Generate a sine function inital value void sine(float complex *input) { for(int i=0;i<N;i++) { input[i] = csin(8.*(( float)i)*M_PI/N); } } // Generate a sinc function initial value void sinc(float complex *input) { for(int i=0;i<N;i++) { input[i] = csin(10.*(( float)i)*M_PI/N)/(10.*(( float)i)*M_PI/N); } input[0] = 1; } int main(int argc, char * argv[]) { float complex *x = malloc(N*sizeof(float complex)); float complex *ft_x = malloc(N*sizeof(float complex)); sinc(x); #pragma omp parallel num_threads(2) fft(ft_x, x, N, 1); }
GB_compiler.h
//------------------------------------------------------------------------------ // GB_compiler.h: handle compiler variations //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #ifndef GB_COMPILER_H #define GB_COMPILER_H //------------------------------------------------------------------------------ // determine which compiler is in use //------------------------------------------------------------------------------ #if defined ( __NVCC__ ) // NVIDIA nvcc compiler #define GB_COMPILER_NVCC 1 #define GB_COMPILER_ICX 0 #define GB_COMPILER_ICC 0 #define GB_COMPILER_CLANG 0 #define GB_COMPILER_GCC 0 #define GB_COMPILER_MSC 0 #define GB_COMPILER_XLC 0 #define GB_COMPILER_MAJOR __CUDACC_VER_MAJOR__ #define GB_COMPILER_MINOR __CUDACC_VER_MINOR__ #define GB_COMPILER_SUB __CUDACC_VER_BUILD__ #define GB_COMPILER_NAME "nvcc" #elif defined ( __INTEL_CLANG_COMPILER ) // Intel icx compiler, 2022.0.0 based on clang/llvm 14.0.0 #define GB_COMPILER_NVCC 0 #define GB_COMPILER_ICX 1 #define GB_COMPILER_ICC 0 #define GB_COMPILER_CLANG 0 #define GB_COMPILER_GCC 0 #define GB_COMPILER_MSC 0 #define GB_COMPILER_XLC 0 #define GB_COMPILER_MAJOR __INTEL_CLANG_COMPILER #define GB_COMPILER_MINOR 0 #define GB_COMPILER_SUB 0 #define GB_COMPILER_NAME __VERSION__ #elif defined ( __INTEL_COMPILER ) // Intel icc compiler: 2021.5.0 uses "gcc 7.5 mode" #define GB_COMPILER_NVCC 0 #define GB_COMPILER_ICX 0 #define GB_COMPILER_ICC 1 #define GB_COMPILER_CLANG 0 #define GB_COMPILER_GCC 0 #define GB_COMPILER_MSC 0 #define GB_COMPILER_XLC 0 #define GB_COMPILER_MAJOR __INTEL_COMPILER #define GB_COMPILER_MINOR __INTEL_COMPILER_UPDATE #define GB_COMPILER_SUB 0 #define GB_COMPILER_NAME __VERSION__ #elif defined ( __clang__ ) // clang #define GB_COMPILER_NVCC 0 #define GB_COMPILER_ICX 0 #define GB_COMPILER_ICC 0 #define GB_COMPILER_CLANG 1 #define GB_COMPILER_GCC 0 #define GB_COMPILER_MSC 0 #define GB_COMPILER_XLC 0 #define GB_COMPILER_MAJOR __clang_major__ #define GB_COMPILER_MINOR __clang_minor__ #define GB_COMPILER_SUB __clang_patchlevel__ #define GB_COMPILER_NAME "clang " __clang_version__ #elif defined ( __xlC__ ) // xlc #define GB_COMPILER_NVCC 0 #define GB_COMPILER_ICX 0 #define GB_COMPILER_ICC 0 #define GB_COMPILER_CLANG 0 #define GB_COMPILER_GCC 0 #define GB_COMPILER_MSC 0 #define GB_COMPILER_XLC 1 #define GB_COMPILER_MAJOR ( __xlC__ / 256 ) #define GB_COMPILER_MINOR ( __xlC__ - 256 * GB_COMPILER_MAJOR) #define GB_COMPILER_SUB 0 #define GB_COMPILER_NAME "IBM xlc " GB_XSTR (__xlC__) #elif defined ( __GNUC__ ) // gcc #define GB_COMPILER_NVCC 0 #define GB_COMPILER_ICX 0 #define GB_COMPILER_ICC 0 #define GB_COMPILER_CLANG 0 #define GB_COMPILER_GCC 1 #define GB_COMPILER_MSC 0 #define GB_COMPILER_XLC 0 #define GB_COMPILER_MAJOR __GNUC__ #define GB_COMPILER_MINOR __GNUC_MINOR__ #define GB_COMPILER_SUB __GNUC_PATCHLEVEL__ #define GB_COMPILER_NAME "GNU gcc " GB_XSTR (__GNUC__) "." \ GB_XSTR (__GNUC_MINOR__) "." GB_XSTR (__GNUC_PATCHLEVEL__) #elif defined ( _MSC_VER ) // Microsoft Visual Studio #define GB_COMPILER_NVCC 0 #define GB_COMPILER_ICX 0 #define GB_COMPILER_ICC 0 #define GB_COMPILER_CLANG 0 #define GB_COMPILER_GCC 0 #define GB_COMPILER_MSC 1 #define GB_COMPILER_XLC 0 #define GB_COMPILER_MAJOR ( _MSC_VER / 100 ) #define GB_COMPILER_MINOR ( _MSC_VER - 100 * GB_COMPILER_MAJOR) #define GB_COMPILER_SUB 0 #define GB_COMPILER_NAME "Microsoft Visual Studio " GB_XSTR (_MSC_VER) #else // other compiler #define GB_COMPILER_NVCC 0 #define GB_COMPILER_ICX 0 #define GB_COMPILER_ICC 0 #define GB_COMPILER_CLANG 0 #define GB_COMPILER_GCC 0 #define GB_COMPILER_MSC 0 #define GB_COMPILER_XLC 0 #define GB_COMPILER_MAJOR 0 #define GB_COMPILER_MINOR 0 #define GB_COMPILER_SUB 0 #define GB_COMPILER_NAME "other C compiler" #endif //------------------------------------------------------------------------------ // compiler variations //------------------------------------------------------------------------------ // Determine the restrict keyword, and whether or not variable-length arrays // are supported. #if GB_COMPILER_NVCC // NVIDIA nvcc compiler for host or device code #define GB_HAS_VLA 1 #define restrict __restrict__ #elif GB_COMPILER_MSC // Microsoft Visual Studio does not have the restrict keyword, but it does // support __restrict, which is equivalent. Variable-length arrays are // not supported. OpenMP tasks are not available. #define GB_HAS_VLA 0 #if defined ( __cplusplus ) // C++ does not have the restrict keyword #define restrict #else // C uses __restrict #define restrict __restrict #endif // Microsoft-specific include file #include <malloc.h> #elif defined ( __cplusplus ) #define GB_HAS_VLA 1 // C++ does not have the restrict keyword #define restrict #elif GxB_STDC_VERSION >= 199901L // ANSI C99 and later have the restrict keyword and variable-length arrays. #define GB_HAS_VLA 1 #else // ANSI C95 and earlier have neither #define GB_HAS_VLA 0 #define restrict #endif //------------------------------------------------------------------------------ // PGI_COMPILER_BUG //------------------------------------------------------------------------------ // If GraphBLAS is compiled with -DPGI_COMPILER_BUG, then a workaround is // enabled for a bug in the PGI compiler. The compiler does not correctly // handle automatic arrays of variable size. #ifdef PGI_COMPILER_BUG // override the ANSI C compiler to turn off variable-length arrays #undef GB_HAS_VLA #define GB_HAS_VLA 0 #endif //------------------------------------------------------------------------------ // OpenMP pragmas and tasks //------------------------------------------------------------------------------ // GB_PRAGMA(x) becomes "#pragma x", but the way to do this depends on the // compiler: #if GB_COMPILER_MSC // MS Visual Studio is not ANSI C11 compliant, and uses __pragma: #define GB_PRAGMA(x) __pragma (x) // no #pragma omp simd is available in MS Visual Studio #define GB_PRAGMA_SIMD #define GB_PRAGMA_SIMD_REDUCTION(op,s) #else // ANSI C11 compilers use _Pragma: #define GB_PRAGMA(x) _Pragma (#x) // create two kinds of SIMD pragmas: // GB_PRAGMA_SIMD becomes "#pragma omp simd" // GB_PRAGMA_SIMD_REDUCTION (+,cij) becomes // "#pragma omp simd reduction(+:cij)" #define GB_PRAGMA_SIMD GB_PRAGMA (omp simd) #define GB_PRAGMA_SIMD_REDUCTION(op,s) GB_PRAGMA (omp simd reduction(op:s)) #endif #define GB_PRAGMA_IVDEP GB_PRAGMA(ivdep) //------------------------------------------------------------------------------ // variable-length arrays //------------------------------------------------------------------------------ // If variable-length arrays are not supported, user-defined types are limited // in size to 128 bytes or less. Many of the type-generic routines allocate // workspace for a single scalar of variable size, using a statement: // // GB_void aij [xsize] ; // // To support non-variable-length arrays in ANSI C95 or earlier, this is used: // // GB_void aij [GB_VLA(xsize)] ; // // GB_VLA(xsize) is either defined as xsize (for ANSI C99 or later), or a fixed // size of 128, in which case user-defined types // are limited to a max of 128 bytes. #if ( GB_HAS_VLA ) // variable-length arrays are allowed #define GB_VLA(s) s #else // variable-length arrays are not allowed #define GB_VLA_MAXSIZE 128 #define GB_VLA(s) GB_VLA_MAXSIZE #endif //------------------------------------------------------------------------------ // AVX2 and AVX512F support for the x86_64 architecture //------------------------------------------------------------------------------ // gcc 7.5.0 cannot compile code with __attribute__ ((target ("avx512f"))), or // avx2 (it triggers a bug in the compiler), but those targets are fine with // gcc 9.3.0 or later. It might be OK on gcc 8.x but I haven't tested this. #if GBX86 #if GB_COMPILER_GCC #if __GNUC__ >= 9 // enable avx512f on gcc 9.x and later #define GB_COMPILER_SUPPORTS_AVX512F 1 #define GB_COMPILER_SUPPORTS_AVX2 1 #else // disable avx2 and avx512f on gcc 8.x and earlier #define GB_COMPILER_SUPPORTS_AVX512F 0 #define GB_COMPILER_SUPPORTS_AVX2 0 #endif #elif GB_COMPILER_ICX || GB_COMPILER_ICC || GB_COMPILER_CLANG // all these compilers can handle AVX512F and AVX2 on x86 #define GB_COMPILER_SUPPORTS_AVX512F 1 #define GB_COMPILER_SUPPORTS_AVX2 1 #else // unsure if xlc can handle AVX, but it is not likely to be used on // the x86 anyway. cpu_features is disabled for MS Visual Studio. #define GB_COMPILER_SUPPORTS_AVX512F 0 #define GB_COMPILER_SUPPORTS_AVX2 0 #endif #else // non-X86_64 architecture #define GB_COMPILER_SUPPORTS_AVX512F 0 #define GB_COMPILER_SUPPORTS_AVX2 0 #endif // prefix for function with target avx512f #if GB_COMPILER_SUPPORTS_AVX512F #if (defined (_WIN64) || defined (_WIN32)) && \ (GB_COMPILER_ICC || GB_COMPILER_ICX) // the Intel compilers on Windows support this feature: #define GB_TARGET_AVX512F __declspec (target ("avx512f")) #else #define GB_TARGET_AVX512F __attribute__ ((target ("avx512f"))) #endif #else #define GB_TARGET_AVX512F #endif // prefix for function with target avx2 #if GB_COMPILER_SUPPORTS_AVX2 #if (defined (_WIN64) || defined (_WIN32)) && \ (GB_COMPILER_ICC || GB_COMPILER_ICX) // the Intel compilers on Windows support this feature: #define GB_TARGET_AVX2 __declspec (target ("avx2")) #else #define GB_TARGET_AVX2 __attribute__ ((target ("avx2"))) #endif #else #define GB_TARGET_AVX2 #endif #endif
estimate_dt_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela, Ruben Zorrilla // // #ifndef KRATOS_ESTIMATE_DT_UTILITIES_H #define KRATOS_ESTIMATE_DT_UTILITIES_H // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" #include "includes/element.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "includes/serializer.h" #include "utilities/openmp_utils.h" #include "utilities/geometry_utilities.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /// Estimate the time step in a fluid problem to obtain a given Courant number. template< unsigned int TDim > class EstimateDtUtility { public: ///@name Life Cycle ///@{ /// Constructor /** * @param ModelPart The model part containing the problem mesh * @param CFL The user-defined Courant-Friedrichs-Lewy number * @param DtMin user-defined minimum time increment allowed * @param DtMax user-defined maximum time increment allowed */ EstimateDtUtility(ModelPart &ModelPart, const double CFL, const double DtMin, const double DtMax): mrModelPart(ModelPart) { mCFL = CFL; mDtMin = DtMin; mDtMax = DtMax; } /// Constructor with Kratos parameters /** * @param ModelPart The model part containing the problem mesh * @param rParameters Kratos parameters containing the CFL number and max time step */ EstimateDtUtility(ModelPart& ModelPart, Parameters& rParameters): mrModelPart(ModelPart) { Parameters defaultParameters(R"({ "automatic_time_step" : true, "CFL_number" : 1.0, "minimum_delta_time" : 1e-4, "maximum_delta_time" : 0.1 })"); rParameters.ValidateAndAssignDefaults(defaultParameters); mCFL = rParameters["CFL_number"].GetDouble(); mDtMin = rParameters["minimum_delta_time"].GetDouble(); mDtMax = rParameters["maximum_delta_time"].GetDouble(); } /// Destructor ~EstimateDtUtility() {} ///@} ///@name Operations ///@{ /// Set the CFL value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetCFL(const double CFL) { mCFL = CFL; } /// Set the maximum time step allowed value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetDtMin(const double DtMin) { mDtMin = DtMin; } /// Set the maximum time step allowed value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetDtMax(const double DtMax) { mDtMax = DtMax; } /// Calculate the maximum time step that satisfies the Courant-Friedrichs-Lewy (CFL) condition. /** * @return A time step value that satisfies the CFL condition for the current mesh and velocity field */ double EstimateDt() { KRATOS_TRY; unsigned int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfElements(),NumThreads,ElementPartition); double CurrentDt = mrModelPart.GetProcessInfo().GetValue(DELTA_TIME); std::vector<double> MaxCFL(NumThreads,0.0); #pragma omp parallel shared(MaxCFL) { int k = OpenMPUtils::ThisThread(); ModelPart::ElementIterator ElemBegin = mrModelPart.ElementsBegin() + ElementPartition[k]; ModelPart::ElementIterator ElemEnd = mrModelPart.ElementsBegin() + ElementPartition[k+1]; GeometryDataContainer GeometryInfo; double MaxLocalCFL = 0.0; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { double ElementCFL = CalculateElementCFL(*itElem,GeometryInfo,CurrentDt); if (ElementCFL > MaxLocalCFL) { MaxLocalCFL = ElementCFL; } } MaxCFL[k] = MaxLocalCFL; } // Reduce to maximum the thread results // Note that MSVC14 does not support max reductions, which are part of OpenMP 3.1 double CurrentCFL = MaxCFL[0]; for (unsigned int k = 1; k < NumThreads; k++) { if (CurrentCFL > MaxCFL[k]) CurrentCFL = MaxCFL[k]; } double NewDt = 0.0; // Avoid division by 0 when the maximum CFL number is close to 0 (e.g. problem initialization) if (CurrentCFL < 1e-10) { KRATOS_INFO("EstimateDtUtility") << "Setting minimum delta time " << mDtMin << " as current time step." << std::endl; NewDt = mDtMin; } else { // Compute new Dt NewDt = mCFL * CurrentDt / CurrentCFL; // Limit max and min Dt if (NewDt > mDtMax) { NewDt = mDtMax; } else if (NewDt < mDtMin) { NewDt = mDtMin; } } // Perform MPI sync if needed mrModelPart.GetCommunicator().MinAll(NewDt); return NewDt; KRATOS_CATCH("") } /// Calculate each element's CFL for the current time step. /** * The elemental CFL is stored in the CFL_NUMBER elemental variable. * To view it in the post-process file, remember to print CFL_NUMBER as a Gauss Point result. */ void CalculateLocalCFL() { KRATOS_TRY; unsigned int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfElements(),NumThreads,ElementPartition); const double CurrentDt = mrModelPart.GetProcessInfo().GetValue(DELTA_TIME); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementIterator ElemBegin = mrModelPart.ElementsBegin() + ElementPartition[k]; ModelPart::ElementIterator ElemEnd = mrModelPart.ElementsBegin() + ElementPartition[k+1]; GeometryDataContainer GeometryInfo; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { double ElementCFL = CalculateElementCFL(*itElem,GeometryInfo,CurrentDt); itElem->SetValue(CFL_NUMBER,ElementCFL); } } KRATOS_CATCH("") } ///@} // Operators private: ///@name Auxiliary Data types ///@{ struct GeometryDataContainer { double Area; array_1d<double, TDim+1> N; BoundedMatrix<double, TDim+1, TDim> DN_DX; }; ///@} ///@name Member Variables ///@{ double mCFL; // User-defined CFL number double mDtMax; // User-defined maximum time increment allowed double mDtMin; // User-defined minimum time increment allowed ModelPart &mrModelPart; // The problem's model part ///@} // Member variables ///@name Private Operations ///@{ double CalculateElementCFL(Element &rElement, GeometryDataContainer& rGeometryInfo, double Dt) { double Proj = 0.0; // Get the element's geometric parameters Geometry< Node<3> >& rGeom = rElement.GetGeometry(); GeometryUtils::CalculateGeometryData(rGeom, rGeometryInfo.DN_DX, rGeometryInfo.N, rGeometryInfo.Area); // Elemental Velocity array_1d<double,3> ElementVel = rGeometryInfo.N[0]*rGeom[0].FastGetSolutionStepValue(VELOCITY); for (unsigned int i = 1; i < TDim+1; ++i) ElementVel += rGeometryInfo.N[i]*rGeom[i].FastGetSolutionStepValue(VELOCITY); // Calculate u/h as the maximum projection of the velocity along element heights for (unsigned int i = 0; i < TDim+1; ++i) { for (unsigned int d = 0; d < TDim; ++d) Proj += ElementVel[d]*rGeometryInfo.DN_DX(i,d); Proj = fabs(Proj); } return Proj*Dt; } ///@} // Private Operations }; ///@} // Kratos classes ///@} } // namespace Kratos. #endif /* KRATOS_ESTIMATE_DT_UTILITIES_H */
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // use the id(...) matcher around the match expressions that match the nodes // you want to access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(id("child", recordDecl()))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the id(...) calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::ast_type_traits::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// If the provided matcher matches a node, binds the node to \c ID. /// /// FIXME: Do we want to support this now that we have bind()? template <typename T> internal::Matcher<T> id(StringRef ID, const internal::BindableMatcher<T> &InnerMatcher) { return InnerMatcher.bind(ID); } /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPublic()) /// matches 'int a;' AST_MATCHER(Decl, isPublic) { return Node.getAccess() == AS_public; } /// Matches protected C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isProtected()) /// matches 'int b;' AST_MATCHER(Decl, isProtected) { return Node.getAccess() == AS_protected; } /// Matches private C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPrivate()) /// matches 'int c;' AST_MATCHER(Decl, isPrivate) { return Node.getAccess() == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::Matcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::Matcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(const std::string &Name) { return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name); } /// Matches C++ classes that are directly or indirectly derived from /// a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_MATCHER_P(CXXRecordDecl, isDerivedFrom, internal::Matcher<NamedDecl>, Base) { return Finder->classIsDerivedFrom(&Node, Base, Builder); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isDerivedFrom, std::string, BaseName, 1) { assert(!BaseName.empty()); return isDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, internal::Matcher<NamedDecl>, Base, 0) { return Matcher<CXXRecordDecl>(anyOf(Base, isDerivedFrom(Base))) .matches(Node, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, std::string, BaseName, 1) { assert(!BaseName.empty()); return isSameOrDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<ValueDecl> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = ast_type_traits::DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; }) /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches RecordDecl object that are spelled with "struct." /// /// Example matches S, but not C or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isStruct) { return Node.isStruct(); } /// Matches RecordDecl object that are spelled with "union." /// /// Example matches U, but not C or S. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isUnion) { return Node.isUnion(); } /// Matches RecordDecl object that are spelled with "class." /// /// Example matches C, but not S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isClass) { return Node.isClass(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches if the given method declaration is virtual. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor and conversion declarations that are marked with /// the explicit keyword. /// /// Given /// \code /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// }; /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2, but not #1. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXConstructorDecl, CXXConversionDecl)) { // FIXME : it's not clear whether this should match a dependent // explicit(....). this matcher should also be able to match // CXXDeductionGuideDecl with explicit specifier. return Node.isExplicit(); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does match 'return > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(intgerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor. /// /// In C++17 copy elidable constructors are no longer being /// generated in the AST as it is not permitted by the standard. They are /// however part of the AST in C++14 and earlier. Therefore, to write a matcher /// that works in all language modes, the matcher has to skip elidable /// constructor AST nodes if they appear in the AST. This matcher can be used to /// skip those elidable constructors. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(any( /// ignoringElidableConstructorCall(callExpr()), /// exprWithCleanups(ignoringElidableConstructorCall(callExpr()))))`` /// matches ``H D = G()`` AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(&Node)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->GetTemporaryExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the Stmt AST node that is marked as being the structured-block /// of an OpenMP executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// \endcode /// /// ``stmt(isOMPStructuredBlock()))`` matches ``{}``. AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == OMPC_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == OMPC_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
ast-dump-openmp-target-update.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int x) { #pragma omp target update to(x) } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-update.c:3:1, line:5:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:5:1> // CHECK-NEXT: `-OMPTargetUpdateDirective {{.*}} <line:4:9, col:32> openmp_standalone_directive // CHECK-NEXT: |-OMPToClause {{.*}} <col:27, col:31> // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <col:9> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CompoundStmt {{.*}} <col:9> // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-update.c:4:9) *const restrict'
core.c
/* Main solver routines for heat equation solver */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "heat.h" /* Update the temperature values using five-point stencil */ void evolve(field *curr, field *prev, double a, double dt) { int i, j; double dx2, dy2; /* Determine the temperature field at next time step * As we have fixed boundary conditions, the outermost gridpoints * are not updated. */ dx2 = prev->dx * prev->dx; dy2 = prev->dy * prev->dy; #pragma omp for private(i, j) for (i = 1; i < curr->nx + 1; i++) { for (j = 1; j < curr->ny + 1; j++) { curr->data[i][j] = prev->data[i][j] + a * dt * ((prev->data[i + 1][j] - 2.0 * prev->data[i][j] + prev->data[i - 1][j]) / dx2 + (prev->data[i][j + 1] - 2.0 * prev->data[i][j] + prev->data[i][j - 1]) / dy2); } } }
coordinate_common.h
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #pragma once #include <algorithm> #include <string> #include <utility> #include <vector> #include <limits> #include "../common/random.h" namespace xgboost { namespace linear { /** * \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the * number of training instances. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * \param w The weight. * \param reg_alpha Unnormalised L1 penalty. * \param reg_lambda Unnormalised L2 penalty. * * \return The weight update. */ inline double CoordinateDelta(double sum_grad, double sum_hess, double w, double reg_alpha, double reg_lambda) { if (sum_hess < 1e-5f) return 0.0f; const double sum_grad_l2 = sum_grad + reg_lambda * w; const double sum_hess_l2 = sum_hess + reg_lambda; const double tmp = w - sum_grad_l2 / sum_hess_l2; if (tmp >= 0) { return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w); } else { return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w); } } /** * \brief Calculate update to bias. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * * \return The weight update. */ inline double CoordinateDeltaBias(double sum_grad, double sum_hess) { return -sum_grad / sum_hess; } /** * \brief Get the gradient with respect to a single feature. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to a single feature. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to the bias. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for the bias. */ inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint i = 0; i < ndata; ++i) { auto &p = gpair[i * num_group + group_idx]; if (p.GetHess() >= 0.0f) { sum_grad += p.GetGrad(); sum_hess += p.GetHess(); } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Updates the gradient vector with respect to a change in weight. * * \param fidx The feature index. * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dw The change in weight. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateResidualParallel(int fidx, int group_idx, int num_group, float dw, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dw == 0.0f) return; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; // update grad value const auto num_row = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < num_row; ++j) { GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0); } } } /** * \brief Updates the gradient vector based on a change in the bias. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dbias The change in bias. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dbias == 0.0f) return; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { GradientPair &g = (*in_gpair)[i * num_group + group_idx]; if (g.GetHess() < 0.0f) continue; g += GradientPair(g.GetHess() * dbias, 0); } } /** * \brief Abstract class for stateful feature selection or ordering * in coordinate descent algorithms. */ class FeatureSelector { public: /*! \brief factory method */ static FeatureSelector *Create(int choice); /*! \brief virtual destructor */ virtual ~FeatureSelector() = default; /** * \brief Setting up the selector state prior to looping through features. * * \param model The model. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * \param param A parameter with algorithm-dependent use. */ virtual void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) {} /** * \brief Select next coordinate to update. * * \param iteration The iteration in a loop through features * \param model The model. * \param group_idx Zero-based index of the group. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * * \return The index of the selected feature. -1 indicates none selected. */ virtual int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) = 0; }; /** * \brief Deterministic selection by cycling through features one at a time. */ class CyclicFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return iteration % model.param.num_feature; } }; /** * \brief Similar to Cyclic but with random feature shuffling prior to each update. * \note Its randomness is controllable by setting a random seed. */ class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { if (feat_index_.size() == 0) { feat_index_.resize(model.param.num_feature); std::iota(feat_index_.begin(), feat_index_.end(), 0); } std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom()); } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return feat_index_[iteration % model.param.num_feature]; } protected: std::vector<bst_uint> feat_index_; }; /** * \brief A random (with replacement) coordinate selector. * \note Its randomness is controllable by setting a random seed. */ class RandomFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return common::GlobalRandom()() % model.param.num_feature; } }; /** * \brief Select coordinate with the greatest gradient magnitude. * \note It has O(num_feature^2) complexity. It is fully deterministic. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). That would reduce the complexity to * O(num_feature*top_k). */ class GreedyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); const bst_uint ngroup = model.param.num_output_group; if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); if (counter_.size() == 0) { counter_.resize(ngroup); gpair_sums_.resize(model.param.num_feature * ngroup); } for (bst_uint gid = 0u; gid < ngroup; ++gid) { counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-K or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; const int ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetColumnBatches()) { #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); auto &sums = gpair_sums_[group_idx * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + group_idx]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } // Find a feature with the largest magnitude of weight change int best_fidx = 0; double best_weight_update = 0.0f; for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) { auto &s = gpair_sums_[group_idx * nfeat + fidx]; float dw = std::abs(static_cast<bst_float>( CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda))); if (dw > best_weight_update) { best_weight_update = dw; best_fidx = fidx; } } return best_fidx; } protected: bst_uint top_k_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief Thrifty, approximately-greedy feature selector. * * \note Prior to cyclic updates, reorders features in descending magnitude of * their univariate weight changes. This operation is multithreaded and is a * linear complexity approximation of the quadratic greedy selection. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). */ class ThriftyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); const bst_uint ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; if (deltaw_.size() == 0) { deltaw_.resize(nfeat * ngroup); sorted_idx_.resize(nfeat * ngroup); counter_.resize(ngroup); gpair_sums_.resize(nfeat * ngroup); } // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetColumnBatches()) { // column-parallel is usually faster than row-parallel #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); for (bst_uint gid = 0u; gid < ngroup; ++gid) { auto &sums = gpair_sums_[gid * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + gid]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } } // rank by descending weight magnitude within the groups std::fill(deltaw_.begin(), deltaw_.end(), 0.f); std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0); bst_float *pdeltaw = &deltaw_[0]; for (bst_uint gid = 0u; gid < ngroup; ++gid) { // Calculate univariate weight changes for (bst_omp_uint i = 0; i < nfeat; ++i) { auto ii = gid * nfeat + i; auto &s = gpair_sums_[ii]; deltaw_[ii] = static_cast<bst_float>(CoordinateDelta( s.first, s.second, model[i][gid], alpha, lambda)); } // sort in descending order of deltaw abs values auto start = sorted_idx_.begin() + gid * nfeat; std::sort(start, start + nfeat, [pdeltaw](size_t i, size_t j) { return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j)); }); counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-N or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; // note that sorted_idx stores the "long" indices const size_t grp_offset = group_idx * model.param.num_feature; return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset); } protected: bst_uint top_k_; std::vector<bst_float> deltaw_; std::vector<size_t> sorted_idx_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief A set of available FeatureSelector's */ enum FeatureSelectorEnum { kCyclic = 0, kShuffle, kThrifty, kGreedy, kRandom }; inline FeatureSelector *FeatureSelector::Create(int choice) { switch (choice) { case kCyclic: return new CyclicFeatureSelector(); case kShuffle: return new ShuffleFeatureSelector(); case kThrifty: return new ThriftyFeatureSelector(); case kGreedy: return new GreedyFeatureSelector(); case kRandom: return new RandomFeatureSelector(); default: LOG(FATAL) << "unknown coordinate selector: " << choice; } return nullptr; } } // namespace linear } // namespace xgboost
opi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b: get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads = atoi(argv[1]); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number #pragma omp parallel { int rank = omp_get_thread_num(); int size = omp_get_num_threads(); drandData[rank] = (struct) rank; long int seed = rank; srand48_r(seed, drandData+rank); } long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; #pragma omp parallel for reduction(+:Ncircle) for (long long int n=0; n<Ntrials; n++) { double rand1; double rand2; int rank = omp_get_thread_num(); //gererate two random numbers (use the thread id to offset drandData) drand48_r(drandData+rank, &rand1); drand48_r(drandData+rank, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); printf("Our estimate of pi is %g \n", pi); } } double pi = 4.0*Ncircle/ (double) (Ntotal); printf("Our final estimate of pi is %g \n", pi); free(drandData); return 0; }
GB_binop__lt_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__lt_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint64) // A*D function (colscale): GB (_AxD__lt_uint64) // D*A function (rowscale): GB (_DxB__lt_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint64) // C=scalar+B GB (_bind1st__lt_uint64) // C=scalar+B' GB (_bind1st_tran__lt_uint64) // C=A+scalar GB (_bind2nd__lt_uint64) // C=A'+scalar GB (_bind2nd_tran__lt_uint64) // C type: bool // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT64 || GxB_NO_LT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
TemporalRowConvolution.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/TemporalRowConvolution.c" #else static inline void THNN_(TemporalRowConvolution_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kW, int dW, int padW) { THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THNN_ARGCHECK(weight->nDimension == 3, 3, weight, "3D weight tensor expected, but got: %s"); THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); } // we're always looking at (possibly batch) x feats x seq int ndim = input->nDimension; int dimF = 0; int dimS = 1; if (ndim == 3) { ++dimS; ++dimF; } THNN_ARGCHECK(ndim == 2 || ndim == 3, 1, input, "2D or 3D (batch mode) input tensor expected, but got :%s"); int64_t inputFrameSize = weight->size[0]; int64_t nInputFrame = input->size[dimS]; int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (nOutputFrame < 1) { THError("Given input size: (%d x %d). " "Calculated output size: (%d x %d). Output size is too small", inputFrameSize, nInputFrame, inputFrameSize, nOutputFrame); } THNN_CHECK_DIM_SIZE(input, ndim, dimF, inputFrameSize); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimF, inputFrameSize); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimS, nOutputFrame); } } static void THNN_(unfolded_acc_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t c; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); // #pragma omp parallel for private(c) for (c = 0; c < inputFrameSize; c++) { int64_t kw, x; int64_t ix = 0; for (kw = 0; kw < kW; kw++) { real *src = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); real *dst = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { real *dst_slice = dst + (size_t)(ix); THVector_(cadd)(dst_slice, dst_slice, src, 1, nOutputFrame); } else { for (x = 0; x < nOutputFrame; x++) { real *dst_slice = dst + (size_t)(ix + x * dW); THVector_(cadd)(dst_slice, dst_slice, src + (size_t)(x), 1, 1); } } } } } static void THNN_(unfolded_copy_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t k; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); // #pragma omp parallel for private(k) for (k = 0; k < inputFrameSize * kW; k++) { int64_t c = k / kW; int64_t rest = k % kW; int64_t kw = rest % kW; int64_t x; int64_t ix; real *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); real *src = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { memcpy(dst, src+(size_t)(ix), sizeof(real) * (nOutputFrame)); } else { for (x = 0; x < nOutputFrame; x++) { memcpy(dst + (size_t)(x), src + (size_t)(ix + x * dW), sizeof(real) * 1); } } } } static void THNN_(TemporalRowConvolution_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t i; THTensor *output3d = THTensor_(newWithStorage3d)( output->storage, output->storageOffset, inputFrameSize, -1, 1, -1, nOutputFrame, -1); THNN_(unfolded_copy_row)(finput, input, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); THTensor_(zero)(output); if (bias != NULL) { for (i = 0; i < inputFrameSize; i++) THVector_(fill) (output->storage->data + output->storageOffset + output->stride[0] * i, THTensor_(get1d)(bias, i), nOutputFrame); } THTensor_(baddbmm)(output3d, 1, output3d, 1, weight, finput); THTensor_(free)(output3d); } void THNN_(TemporalRowConvolution_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, // unused here but needed for Cuda int kW, int dW, int padW, bool featFirst) { int ndim = input->nDimension; THTensor *tinput; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); } else { input = THTensor_(newContiguous)(input); } THNN_(TemporalRowConvolution_shapeCheck)( state, input, NULL, weight, bias, kW, dW, padW); int64_t inputFrameSize = weight->size[0]; int64_t nInputFrame = input->size[ndim - 1]; int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (ndim == 2) { /* non-batch mode */ THTensor_(resize3d)(finput, inputFrameSize, kW, nOutputFrame); THTensor_(resize2d)(output, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); THNN_(TemporalRowConvolution_updateOutput_frame) (input, output, weight, bias, finput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size[0]; int64_t t; THTensor_(resize4d)(finput, T, inputFrameSize, kW, nOutputFrame); THTensor_(resize3d)(output, T, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } if (!featFirst) { // NOTE: output will NOT be contiguous in this case THTensor_(transpose)(output, output, ndim - 1, ndim - 2); THTensor_(free)(tinput); } THTensor_(free)(input); } static void THNN_(TemporalRowConvolution_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { THTensor *gradOutput3d = THTensor_(newWithStorage3d)( gradOutput->storage, gradOutput->storageOffset, inputFrameSize, -1, 1, -1, nOutputFrame, -1); // weight: inputFrameSize x kW x 1 // gradOutput3d: inputFrameSize x 1 x nOutputFrame THTensor_(baddbmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput3d); // fgradInput: inputFrameSize x kW x nOutputFrame THTensor_(free)(gradOutput3d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_row)(fgradInput, gradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } void THNN_(TemporalRowConvolution_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst) { int ndim = input->nDimension; THTensor *tinput, *tgradOutput; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck)(state, input, gradOutput, weight, NULL, kW, dW, padW); int64_t inputFrameSize = weight->size[0]; int64_t nInputFrame = input->size[ndim - 1]; int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; THTensor_(resizeAs)(fgradInput, finput); THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(fgradInput); THTensor_(zero)(gradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 1, 2); if (ndim == 2) { THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput, gradOutput, tweight, fgradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size[0]; int64_t t; #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(free)(tweight); if (!featFirst) { // NOTE: gradInput will NOT be contiguous in this case THTensor_(free)(tinput); THTensor_(free)(tgradOutput); THTensor_(transpose)(gradInput, gradInput, ndim - 1, ndim - 2); } THTensor_(free)(input); THTensor_(free)(gradOutput); } static void THNN_(TemporalRowConvolution_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { int64_t i; THTensor *gradOutput3d = THTensor_(newWithStorage3d)( gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, 1, -1, gradOutput->size[1], -1); THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 1, 2); // gradOutput3d: inputFrameSize x 1 x nOutputFrame // finput: inputFrameSize x nOutputFrame x kW THTensor_(baddbmm)(gradWeight, 1, gradWeight, scale, gradOutput3d, tfinput); // gradWeight: inputFrameSize x 1 x kW THTensor_(free)(tfinput); if (gradBias != NULL) { for (i = 0; i < gradBias->size[0]; i++) { int64_t k; real sum = 0; real *data = gradOutput3d->storage->data + gradOutput3d->storageOffset + i * gradOutput3d->stride[0]; for (k = 0; k < gradOutput3d->size[2]; k++) { sum += data[k]; } (gradBias->storage->data + gradBias->storageOffset)[i] += scale * sum; } } THTensor_(free)(gradOutput3d); } void THNN_(TemporalRowConvolution_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int ndim = input->nDimension; THTensor *tinput, *tgradOutput; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck) (state, input, gradOutput, gradWeight, gradBias, kW, dW, padW); if (ndim == 2) { THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput, gradWeight, gradBias, finput, scale); } else { int64_t T = input->size[0]; int64_t t; for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } if (!featFirst) { THTensor_(free)(tinput); THTensor_(free)(tgradOutput); } THTensor_(free)(input); THTensor_(free)(gradOutput); } #endif
gp_rank_openmp.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <errno.h> #include <limits.h> #include <string.h> #include <time.h> #include <omp.h> #define GIG 1000000000 #define CPG 2.4 // Cycles per GHz -- Adjust to your computer typedef struct ad_vert { long vertex_num; struct ad_vert *next; }adj_vert_t; typedef struct { double curr_page_rank; double next_page_rank; long num_adj_nodes; adj_vert_t *last_node_addr; void *next; }vertex_t; double epsilon; double rand_hop = 0.15; #define GRAPH_FILE_SEPERATOR " ,;" #define MAX_LINE_LEN 100 #define RAND_HOP_LIKELIHOOD(r_hop_prob, nvert) ((r_hop_prob) / (nvert)) #define TRAV_LIKELIHOOD(r_hop_prob, g, index) ((1 - (r_hop_prob)) * (g)[index].curr_page_rank / (g)[index].num_adj_nodes) #define TRAV_LIKELIHOOD_LEAF(r_hop_prob, g, index) ((1 - (r_hop_prob)) * (g)[index].curr_page_rank / (num_vertices - 1)) struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } long string_to_long(char *str) { long val; char *endptr; errno = 0; val = strtol(str, &endptr, 10); if((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 && val == 0) || (endptr == str)) { perror("Error while converting string to long value"); val = -1; } return val; } void initialize_vertices(vertex_t *g, long num_vertices) { long i; omp_set_num_threads(200); #pragma omp parallel for for(i = 0;i < num_vertices;i++) { g[i].curr_page_rank = 1 / (double)num_vertices; g[i].next_page_rank = RAND_HOP_LIKELIHOOD(rand_hop, num_vertices); g[i].num_adj_nodes = 0; g[i].last_node_addr = NULL; g[i].next = NULL; } } int append_node(vertex_t *g, long parent_vertex, long child_vertex, long num_verts) { if(parent_vertex >= num_verts || child_vertex >= num_verts) { printf("Invalid arguments\n"); return -1; } adj_vert_t *ptr = (adj_vert_t *)malloc(sizeof(adj_vert_t)); ptr->vertex_num = child_vertex; ptr->next = NULL; if(g[parent_vertex].next == NULL) { g[parent_vertex].next = ptr; g[parent_vertex].last_node_addr = ptr; } else { g[parent_vertex].last_node_addr->next = ptr; g[parent_vertex].last_node_addr = ptr; } g[parent_vertex].num_adj_nodes++; return 0; } void print_converged_pr_vals(vertex_t *g, long num_vertices) { long i; double sum=0; omp_set_num_threads(500); #pragma omp parallel for reduction(+:sum) for(i = 0;i < num_vertices;i++){ printf("Converged page rank for node %lu : %.10f\n",i,g[i].curr_page_rank); sum += g[i].curr_page_rank; } printf("Sum is %f\n",sum); } int main(int argc, char *argv[]) { long i,j; FILE *file; char *token1, *token2; char line[MAX_LINE_LEN]; adj_vert_t *ptr = NULL; double value = 0; double pr_diff; long num_vertices = 0; long pnode, cnode; long iterations=0; vertex_t *graph; struct timespec time_diff; struct timespec diff(struct timespec start, struct timespec end); struct timespec time1, time2; if(argc != 3) return -1; num_vertices = string_to_long(argv[1]); if(num_vertices < 0) return -1; graph = (vertex_t *)malloc(num_vertices * sizeof(vertex_t)); epsilon =(double) 0.000001/num_vertices; if(!graph) return -1; initialize_vertices(graph, num_vertices); file = fopen(argv[2],"r"); if(file) { while (fgets(line, sizeof(line), file)) { token1 = strtok (line,GRAPH_FILE_SEPERATOR); token2 = strtok(NULL,GRAPH_FILE_SEPERATOR); if(token1 == NULL || token2 == NULL || strtok(NULL,GRAPH_FILE_SEPERATOR) != NULL) return -1; pnode = string_to_long(token1); cnode = string_to_long(token2); if(pnode < 0 || cnode < 0) return -1; if(append_node(graph,pnode,cnode,num_vertices)) return -1; } } else return -1; printf("Graph parsing successful\n"); int accum; //Start time clock_gettime(CLOCK_REALTIME, &time1); do { pr_diff = 0; for(i = 0;i < num_vertices;i++) { if(graph[i].next == NULL) { omp_set_num_threads(200); #pragma omp parallel for for(j = 0;j < num_vertices;j++) { if(j != i){ //accum += TRAV_LIKELIHOOD_LEAF(rand_hop,graph, i); graph[j].next_page_rank += TRAV_LIKELIHOOD_LEAF(rand_hop,graph, i); } //graph[j].next_page_rank = accum; } } else value = TRAV_LIKELIHOOD(rand_hop, graph, i); for(ptr = (adj_vert_t *)graph[i].next;ptr != NULL;ptr = ptr->next){ graph[ptr->vertex_num].next_page_rank += value; } } #pragma omp parallel for reduction(+:pr_diff) for(i = 0;i < num_vertices;i++) { pr_diff += fabsf(graph[i].next_page_rank - graph[i].curr_page_rank); graph[i].curr_page_rank = graph[i].next_page_rank; graph[i].next_page_rank = RAND_HOP_LIKELIHOOD(rand_hop,num_vertices); } #pragma omp barrier //printf("Diff : %f\n",pr_diff); iterations++; }while(pr_diff > epsilon); //End Time clock_gettime(CLOCK_REALTIME, &time2); time_diff=diff(time1,time2); //print_converged_pr_vals(graph,num_vertices); printf("Number of iterations: %lu\n",iterations); printf("Number of Cycles: %ld\n", (long int)((double)(CPG)*(double) (GIG * time_diff.tv_sec + time_diff.tv_nsec))); return 0; }
GB_binop__second_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint8) // A*D function (colscale): GB (_AxD__second_uint8) // D*A function (rowscale): GB (_DxB__second_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__second_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__second_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = bij #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT8 || GxB_NO_SECOND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__second_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__second_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__second_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_matvec_type_name.c
//------------------------------------------------------------------------------ // GB_matvec_type_name: return the name of the type of a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GB_matvec_type_name // return the name of the type of a matrix ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Matrix A, // matrix to query GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL (type_name) ; ASSERT_MATRIX_OK (A, "A for type_name", GB0) ; //-------------------------------------------------------------------------- // return the type //-------------------------------------------------------------------------- memcpy (type_name, A->type->name, GxB_MAX_NAME_LEN) ; #pragma omp flush return (GrB_SUCCESS) ; }
main.c
// ! Requirements // - Execute the multiplication 15 times, // - Run the code 5 times for each execution // - Manage double precision for all the arrays #include <omp.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define MAX_INPUT_SIZE 100 #define CALCULATIONS 15 #define EXECUTIONS 5 long timediff(clock_t t1, clock_t t2) { long elapsed; elapsed = ((double)t2 - t1) / CLOCKS_PER_SEC * 1000; return elapsed; } typedef struct { char *name; size_t columns; size_t rows; double *data; } Matrix; typedef struct { double timeTaken; } Execution_Metric; Matrix *serialC; void copySerialC(Matrix *c) { serialC = (Matrix *)malloc(sizeof(Matrix)); serialC->columns = c->columns; serialC->rows = c->rows; const size_t matrixBytes = sizeof(double) * serialC->columns * serialC->rows; serialC->data = (double *)malloc(matrixBytes); if (serialC->data == NULL) { printf("Couldn\t separate the specified memory. Exiting..."); exit(1); } memcpy(serialC->data, c->data, matrixBytes); } void verifyResultMat(Matrix *mat) { printf("VERIFYING RESULTS...\n"); for (size_t rows; rows < mat->rows; rows++) for (size_t columns; columns < mat->columns; columns++) { size_t index = rows * mat->rows + columns; if (mat->data[index] != serialC->data[index]) { printf("ERROR: Results are not the same\n"); exit(1); } } printf("RESULTS VERIFIED! They are the same!\n"); } void printMetrics(Execution_Metric *metrics) { for (int i = 0; i < EXECUTIONS; i++) { printf("%lf\n", metrics[i].timeTaken); } } void printMatrix(Matrix *mat) { printf("PRINTING MATRIX %s\n", mat->name); for (size_t rows; rows < mat->rows; rows++) { for (size_t columns; columns < mat->columns; columns++) printf("%lf", *(mat->data + rows * mat->columns + columns)); printf("\n"); } } char *getInput() { char *input = (char *)malloc(MAX_INPUT_SIZE); memset(input, '\0', MAX_INPUT_SIZE); scanf("%s", input); return input; } Matrix *getMatrixInfo(char *matName) { Matrix *mat = (Matrix *)malloc(sizeof(Matrix)); printf("How many rows does Matrix %s have? ", matName); scanf("%zu", &mat->rows); printf("How many columns does Matrix %s have? ", matName); scanf("%zu", &mat->columns); mat->data = (double *)malloc(sizeof(double) * mat->rows * mat->columns); mat->name = (char *)malloc(strlen(matName) + 1); memcpy(mat->name, matName, strlen(matName) + 1); if (mat->data == NULL) { printf("Couldn\t separate the specified memory. Exiting..."); exit(1); } return mat; } void freeMatrix(Matrix *mat) { free(mat->data); free(mat); } void fillMatrix(Matrix *mat, bool readTanspose) { printf("What is the fileName for matrix %s ", mat->name); char *fileName = getInput(); FILE *file = fopen(fileName, "r"); if (file == NULL) { printf("Error opening file!\n"); exit(1); } double tempFloat; for (size_t i = 0; i < mat->rows; i++) { for (size_t j = 0; j < mat->columns; j++) { if (feof(file)) { printf("The specified dimensions aren\'t met!\n"); exit(1); } fscanf(file, "%lf\n", &tempFloat); if (readTanspose) *(mat->data + (j * mat->rows) + i) = tempFloat; else *(mat->data + (i * mat->columns) + j) = tempFloat; } } if (!feof(file)) { printf("The specified dimensions aren\'t met"); exit(1); } fclose(file); free(fileName); } void serialProcedure(Matrix *a, Matrix *b, Matrix *c) { for (size_t i = 0; i < c->rows; i++) for (size_t j = 0; j < c->columns; j++) { c->data[i * c->columns + j] = 0; for (int k = 0; k < a->columns; k++) c->data[i * c->columns + j] += a->data[i * a->columns + k] * b->data[j * a->columns + k]; } } void ompProcedure(Matrix *a, Matrix *b, Matrix *c) { #pragma omp parallel for for (size_t i = 0; i < c->rows; i++) for (size_t j = 0; j < c->columns; j++) { c->data[i * c->columns + j] = 0; for (size_t k = 0; k < a->columns; k++) c->data[i * c->columns + j] += a->data[i * a->columns + k] * b->data[j * a->columns + k]; } } void printTable(Execution_Metric *a, Execution_Metric *b) { printf("Run#\t|Serial\t\t|Parallel1\t\t\n"); double avg[2] = {0, 0}; for (int i = 0; i < EXECUTIONS; i++) { avg[0] += a[i].timeTaken; avg[1] += b[i].timeTaken; printf("%d\t|%lf\t|%lf\n", i + 1, a[i].timeTaken, b[i].timeTaken); } printf("promedio|%lf\t|%lf\n", avg[0], avg[1]); printf("%% vs serial|\t\t|%lf\n", avg[1] / avg[0]); } void calculateMetrics(Execution_Metric *metrics, void (*f)(Matrix *, Matrix *, Matrix *), Matrix *a, Matrix *b, Matrix *c) { int iteration = 0, calculation; clock_t start, end; while (iteration < EXECUTIONS) { calculation = 0; start = clock(); while (calculation < CALCULATIONS) { f(a, b, c); calculation++; } end = clock(); metrics[iteration].timeTaken = timediff(start, end); iteration++; } } Matrix *getResultMatrixSpecifications(Matrix *a, Matrix *b) { if (a->columns != b->rows) { printf("Can\'t compute the matrix multilication (The columns of A must be the same as the rows of B)"); exit(1); } Matrix *c = (Matrix *)malloc(sizeof(Matrix)); c->rows = a->rows; c->columns = b->columns; c->data = (double *)malloc(sizeof(double) * c->rows * c->columns); if (c->data == NULL) { printf("Couldn't separate the specified memory. Exiting...\n"); exit(1); } return c; } void writeMultiplicationRes(Matrix *c) { FILE *file = fopen("matrixC.txt", "w"); for (size_t i = 0; i < c->rows; i++) for (size_t j = 0; j < c->columns; j++) fprintf(file, "%0.10lf\n", c->data[i * c->columns + j]); fclose(file); } void main() { Matrix *a = getMatrixInfo("a"); fillMatrix(a, false); Matrix *b = getMatrixInfo("b"); fillMatrix(b, true); Matrix *c = getResultMatrixSpecifications(a, b); size_t temp = b->columns; b->columns = b->rows; b->rows = temp; Execution_Metric Serial_Metrics[EXECUTIONS]; Execution_Metric OMP_Metrics[EXECUTIONS]; Execution_Metric Intrinsics_Metrics[EXECUTIONS]; calculateMetrics(Serial_Metrics, (void *)serialProcedure, a, b, c); writeMultiplicationRes(c); copySerialC(c); calculateMetrics(OMP_Metrics, ompProcedure, a, b, c); verifyResultMat(c); printTable(Serial_Metrics, OMP_Metrics); // calculateMetrics(Intrinsics_Metrics, intrinsicsProcedure, a, b, c); // verifyResultMat(c); freeMatrix(a); freeMatrix(b); freeMatrix(c); freeMatrix(serialC); }
2d_simple_v1.c
#include <stdlib.h> #include <omp.h> int main() { int** arr = malloc(sizeof(int*)); arr[0] = malloc(sizeof(int)); #pragma omp parallel { arr[0][0] = 42; printf("%d\n", arr[0][0]); } free(arr[0]); free(arr); }
bfm_evo.h
/* -*- mode:c++; c-basic-offset:2 -*- */ /****************************************************************************/ /* Aug 2012 */ /* Hantao Yin */ /* */ /* bfm_evo.h */ /* functions added by Hantao (mainly DWF-like fermion evolution related). */ /* */ /****************************************************************************/ #ifndef INCLUDED_BFM_EVO_HT_H #define INCLUDED_BFM_EVO_HT_H #include <stdio.h> #include <bagel_int.h> #include <bfm.h> #include <bfm_qdp.h> #include <omp.h> #include <math.h> #include <vector> #include <util/gjp.h> #include "bfm_evo_aux.h" enum { Export = 0, Import = 1 }; // FIXME: it inherits from bfm_qdp for the sole reason of using its // importGauge() function. I'm too lazy to do any manual // shifts/conjugates ...... template <class Float> class bfm_evo : public bfm_qdp<Float> { public: // BFM has this // enum {Even = 0, Odd}; integer cps_idx_cb(int x[4], int s, int reim, int i, int i_size); integer cps_idx_cb_gparity(int x[4], int s, int reim, int i, int i_size, int flav); // s outer most integer cps_idx(int x[4], int s, int reim, int i, int i_size); integer cps_idx_gparity(int x[4], int s, int reim, int i, int i_size, int flav); // s inner most (but outside color and spin) integer cps_idx_s(int x[4], int s, int reim, int i, int i_size); // index for 4d fermion field integer cps_idx_4d (int x[4], int reim, int i, int i_size); integer cps_idx_s_gparity(int x[4], int s, int reim, int i, int i_size, int flav); // compute the vector pair (v1, v2) needed to calculate fermion force. void calcMDForceVecs(Fermion_t v1[2], Fermion_t v2[2], Fermion_t phi1, Fermion_t phi2); void Booee(Fermion_t psi, Fermion_t chi, int dag); // Ritz method used to compute the maximum/minimum eigenvalue of M^\dag M. // Use algorithm presented in arXiv: hep-lat/9507023. // // If compute_min == true then we compute the minmum eigenvalue of // M^\dag M, otherwise we compute the maximum eigenvalue, i.e. the // negative of the minimum eigenvalue of -M^\dag M. double ritz(Fermion_t x, int compute_min); // solve a propagator, for HtCayleyTanh this is just unpreconditioned CG // for HmCayleyTanh this is D^{-1} Dminus acting on in[2]. // int prop_solve(Fermion_t out[2], Fermion_t in[2]); // ====================================================================== // these functions need to be rewritten to fit into bfm style. // currently they contain both bfm and CPS style gauge/fermion fields. #if 0 //testing private: #endif // auxiliary functions used by compute_force(). //CK: gpf1_offset_p is the offset to reach the second G-parity flavour in the vectors v1p and v2p. // Its value depends on whether v1p/v2p are internal vectors (24*5dvol) or in the buffer send // from the next node (24*Ls*3dsurfvol where 3dsurfvol is the 3d surface volume in the comms direction) void fforce_site(Float *mom, Float *gauge, Float *v1, Float *v1p, Float *v2, Float *v2p, int mu, Float coef, int gpf1_offset_p = 0); void fforce_internal(Float *mom, Float *gauge, Float *v1, Float *v2, // internal data Float coef, int mu, int me, int nthreads); void fforce_surface(Float *mom, Float *gauge, Float *v1, Float *v2, // internal data Float *v1_s, Float *v2_s, // surface data Float coef, int mu); void copySendFrmData(Float v3d[], Float v4d[], int mu, bool send_neg); // complex version of axpy() void axpy_c(Fermion_t r, Fermion_t x, Fermion_t y, std::complex<double> a, Fermion_t tmp) { printf("void axpy_c temporarily disabled\n"); exit(-1); #if 0 this->zaxpy(r, x, y, a); #endif } public: void thread_work_partial_nobarrier(int nwork, int me, int nthreads, int &mywork, int &myoff) { int basework = nwork / nthreads; int backfill = nthreads - (nwork % nthreads); mywork = (nwork + me) / nthreads; myoff = basework * me; if ( me > backfill ) myoff += (me-backfill); } // compute fermion force: // // mom += coef * (phiL^\dag e_i(M) \phiR + \phiR^\dag e_i(M^\dag) \phiL) // // For BFM M is M = M_oo - M_oe M^{-1}_ee M_eo void compute_force(Float *mom, Float *gauge, Fermion_t phiL, Fermion_t phiR, double coef); #if 0 //CHECK CODE template<typename FloatEXT> void thread_impexFermion_s_test(FloatEXT *psi, Fermion_t handle[2], int doimport); #endif // psi assumes the following order: (color, spin, s, x, y, z, t), // mainly used to import/export the "v1" and "v2" vectors in evolution. template<typename FloatEXT> void thread_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport); Float *threadedAllocFloat(size_t size, int mem_type=mem_slow); void threadedFreeFloat(Float *); // bicg_M: Biconjugate gradient method on preconditioned Dirac // operator (It never converges). // // FIXME: test code only, don't use it unless you know what you are // doing. int bicg_M(Fermion_t sol, Fermion_t src); // bicgstab_M: Biconjugate gradient stabilized method on // preconditioned Dirac operator. // // FIXME: test code only, don't use it unless you know what you are // doing. int bicgstab_M(Fermion_t sol, Fermion_t src); // GCR, solves M x = b int gcr_M(Fermion_t sol, Fermion_t src); // GMRES(m) solves M x = b. // // Restarts after m iterations. int gmres_M(Fermion_t sol, Fermion_t src, const int m); public: //====================================================================== // the following member functions are single-threaded functions: // ====================================================================== // psi assumes 5D even/odd preconditioned order: (color, spin, x, y, z, t, s)/2 template<typename FloatEXT> void cps_impexcbFermion(FloatEXT *psi, Fermion_t handle, int doimport, int cb); // psi assumes regular canonical order: (color, spin, x, y, z, t, s) template<typename FloatEXT> void cps_impexFermion(FloatEXT *psi, Fermion_t handle[2], int doimport); // psi assumes the following order: (color, spin, s, x, y, z, t), // mainly used to import/export the "v1" and "v2" vectors in evolution. template<typename FloatEXT> void cps_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport); // template<typename FloatEXT> // void cps_importGauge(FloatEXT *importme); // Imports a 4D CPS fermion to a 5D BFM fermion, putting the left-handed // part at s=0 and the right-handed part at s=Ls-1. (Or does the inverse, // exporting a 5D BFM fermion to a 4D CPS fermion). // psi assumes regular canonical order: (color, spin, x, y, z, t) template < typename FloatEXT > void cps_impexFermion_4d (FloatEXT * psi, Fermion_t handle[2], int doimport, bool prezero = true) // Imports a 4D CPS fermion to a 5d BFM fermion, putting the left-handed // part at s=0 and the right-handed part at s=Ls-1. (Or does the inverse, // exporting a 5D BFM fermion to a 4D CPS fermion). //template < class Float > template < typename FloatEXT > // void bfm_evo < Float >::cps_impexFermion_4d (FloatEXT * psi, // Fermion_t handle[2], // int doimport, bool prezero) { if (doimport && prezero) { #pragma omp parallel { // zero out 5d bulk since we only import to the walls this->set_zero (handle[Even]); this->set_zero (handle[Odd]); } } int Nspinco = 12; int i_inc = this->simd () * 2; int vol4d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; omp_set_num_threads (this->nthread); Float *bagel[2] = { (Float *) handle[0], (Float *) handle[1] }; #pragma omp parallel for for (int site = 0; site < vol4d; site++) { int x[4]; int si = site; x[0] = si % this->node_latt[0]; si = si / this->node_latt[0]; x[1] = si % this->node_latt[1]; si = si / this->node_latt[1]; x[2] = si % this->node_latt[2]; si = si / this->node_latt[2]; x[3] = si % this->node_latt[3]; int bidx_base_left = this->bagel_idx5d (x, 0, 0, 0, Nspinco, 1); int bidx_base_right = this->bagel_idx5d (x, this->Ls - 1, 0, 0, Nspinco, 1); int cidx_base = this->cps_idx_4d (x, 0, 0, Nspinco); for (int co = 0; co < Nspinco; co++) { // right-handed components are first six spin-color components // left-handed components are last six spin-color components int bidx_base; int s; if (co < 6) { bidx_base = bidx_base_right; s = this->Ls - 1; } else { bidx_base = bidx_base_left; s = 0; } int sp = this->precon_5d ? s : 0; int cb = (x[0] + x[1] + x[2] + x[3] + sp) & 0x1; for (int reim = 0; reim < 2; reim++) { int bidx = bidx_base + reim + co * i_inc; int cidx = cidx_base + reim + co * 2; if (doimport) bagel[cb][bidx] = psi[cidx]; else psi[cidx] = bagel[cb][bidx]; } } //co, reim } //xyzts } template < typename FloatEXT > void cps_importGauge (FloatEXT * importme); #if 0 //CK: Appears to assume 'importme' is in canonical ordering //template <class Float> template<typename FloatEXT> //void bfm_evo<Float>::cps_importGauge(FloatEXT *importme) { int u_sz = Nd; if(cps::GJP.Gparity()) u_sz *= 2; //U* fields are stacked on second set of Nd LatticeColorMatrix objects in the array multi1d<LatticeColorMatrix> U(u_sz); omp_set_num_threads(this->nthread); int Ndircoco = 72; int Ncoco = 9; QDPdouble *U_p; int vol4d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; assert (vol4d>0 ); for (int muu=0;muu<u_sz;muu++) { U_p = (QDPdouble *)&(U[muu].elem(0).elem()); int flav = muu / Nd; int mu = muu % Nd; #pragma omp parallel for for (int site=0;site<vol4d;site++ ) { int x[4]; int s=site; x[0]=s%this->node_latt[0]; s/=this->node_latt[0]; x[1]=s%this->node_latt[1]; s/=this->node_latt[1]; x[2]=s%this->node_latt[2]; s/=this->node_latt[2]; x[3]=s%this->node_latt[3]; int qidx_base = this->chroma_idx(x, 0, 0, Ncoco); for(int coco = 0; coco < Ncoco; ++coco) { for ( int reim = 0; reim < 2; ++reim) { int qidx = qidx_base + reim + coco * 2; int siteoff = mu + Nd * site + flav*Nd*vol4d; //Second G-parity flavour offset by Nd*vol4d int cidx = reim + 2 * (coco + Ncoco * siteoff); U_p[qidx] = importme[cidx]; }} // reim,coco } // x }//mu // if(this->isBoss()) printf("before importGauge\n"); // to bfm this->importGauge (U); // if(this->isBoss()) printf("after importGauge\n"); } #endif //EigCG #if 0 //THESE ARE IN BFM Fermion_t allocCompactFermion (int mem_type=mem_slow); Fermion_t threadedAllocCompactFermion (int mem_type=mem_slow); void* threaded_alloc(int length, int mem_type=mem_slow); void threaded_free(void *handle); #endif int EIG_CGNE_M(Fermion_t solution[2], Fermion_t source[2]); int Eig_CGNE_prec(Fermion_t psi, Fermion_t src); #if 0 //CK: leaving them in BFM // copied from Jianglei's bfm double CompactMprec(Fermion_t compact_psi, Fermion_t compact_chi, Fermion_t psi, Fermion_t chi, Fermion_t tmp, int dag,int donrm=0) ; // copied from Jianglei's bfm void CompactMunprec(Fermion_t compact_psi[2], Fermion_t compact_chi[2], Fermion_t psi[2], Fermion_t chi[2], Fermion_t tmp, int dag); #endif // do deflation using eigenvectors/eigenvalues from Rudy's Lanczos code. void deflate(Fermion_t out, Fermion_t in, const multi1d<Fermion_t [2]> *evec, const multi1d<Float> *eval, int N); void set_mass (double mass); //#ifdef USE_NEW_BFM_GPARITY #if 1 inline void axpby_ssp_proj(Fermion_t out, std::complex<double> a,Fermion_t x, std::complex<double> b,Fermion_t y,int sxo,int sy,int psign){ this->axpby_ssp_proj_complex(out,a.real(),a.imag(),x,b.real(),b.imag(),y,sxo,sy,psign); } #endif }; // Simple utility function to set the mass and reinit if necessary. template < class Float > void bfm_evo < Float >::set_mass (double mass) { if (this->mass != mass) { this->mass = mass; this->GeneralisedFiveDimEnd (); this->GeneralisedFiveDimInit (); } } //CK: this function gives the offset within a checkerboarded vector template<class Float> integer bfm_evo<Float>::cps_idx_cb(int x[4], int s, int reim, int i, int i_size) { // int cb = ( x[0]+x[1]+x[2]+x[3] )&0x1; int csite =x[0] +this->node_latt[0] *(x[1] + this->node_latt[1] *(x[2] +this->node_latt[2] *(x[3] +s*this->node_latt[3]))); csite /= 2; // int cbvol = (this->node_latt[0]* // this->node_latt[1]* // this->node_latt[2]* // this->node_latt[3]* // this->cbLs)/2; // return (cb*cbvol+csite)*i_size*2 + i*2 + reim; return csite*i_size*2 + i*2 + reim; } //For G-parity the WILSON layout is 5d preconditioned //| s=0 | s=1 | ......... | s = 0 |..... //| odd f0 | odd f1 | even f0 | even f1 | ......... | even f0 | even f1 |..... //where the blocks on the lowest line have their *4d* parity indicated. (5d parity) = [(4d parity) + s] % 2 //hence the first half of the full WILSON vector had 5d parity odd, and the second half 5d parity even template<class Float> integer bfm_evo<Float>::cps_idx_cb_gparity(int x[4], int s, int reim, int i, int i_size, int flav) { int s_off = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; //2 4D half-volumes, one for each flavour int f_off = s_off/2; int csite =x[0] +this->node_latt[0] *(x[1] + this->node_latt[1] *(x[2] +this->node_latt[2] * x[3])); csite /= 2; csite += flav * f_off + s*s_off; return csite*i_size*2 + i*2 + reim; } template<class Float> integer bfm_evo<Float>::cps_idx(int x[4], int s, int reim, int i, int i_size) { int csite = x[0] + this->node_latt[0] *(x[1] + this->node_latt[1] *(x[2] +this->node_latt[2] *(x[3] +s*this->node_latt[3]))); return (csite*i_size + i)*2 + reim; } template<class Float> integer bfm_evo<Float>::cps_idx_gparity(int x[4], int s, int reim, int i, int i_size, int flav) { //For G-parity we have 2 flavours on each s-slice int s_off = 2*this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; //2 4D volumes, one for each flavour int f_off = s_off/2; int csite = x[0] + this->node_latt[0] *(x[1] + this->node_latt[1] *(x[2] +this->node_latt[2]*x[3])); csite += s*s_off + flav * f_off; return (csite*i_size + i)*2 + reim; } template<class Float> integer bfm_evo<Float>::cps_idx_s(int x[4], int s, int reim, int i, int i_size) { int csite = s + this->Ls *(x[0] + this->node_latt[0] *(x[1] + this->node_latt[1] *(x[2] +this->node_latt[2] *x[3]))); return (csite*i_size + i)*2 + reim; } template < class Float > integer bfm_evo < Float >::cps_idx_4d (int x[4], int reim, int i, int i_size) { int csite = x[0] + this->node_latt[0] * (x[1] + this->node_latt[1] * (x[2] + this->node_latt[2] * (x[3]))); return (csite * i_size + i) * 2 + reim; } template<class Float> integer bfm_evo<Float>::cps_idx_s_gparity(int x[4], int s, int reim, int i, int i_size, int flav) { //This s-inner mapping is new here. Offset the second flavour by 1 5D volume, just like in bfm int f_off = this->Ls * this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; int csite = s + this->Ls *(x[0] + this->node_latt[0] *(x[1] + this->node_latt[1] *(x[2] +this->node_latt[2] *x[3]))); csite += flav * f_off; return (csite*i_size + i)*2 + reim; } //CK: Note if the BFM preconditioning is 4D then the 4D checkerboard of the imported field will be the opposite of the 5D checkerboard of the CPS field! cb is the output checkerboard. //The set of all sites with x+y+z+t+s odd is the same as the set of sites with x+y+z+t even, and vice versa. template <class Float> template<typename FloatEXT> void bfm_evo<Float>::cps_impexcbFermion(FloatEXT *psi, Fermion_t handle, int doimport, int cb) { int Nspinco=12; int i_inc = this->simd() * 2; int vol5d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3] * this->Ls; Float *bagel = (Float *)handle; omp_set_num_threads(this->nthread); int work = vol5d; if(cps::GJP.Gparity()) work*=2; #pragma omp parallel for for (int sf = 0; sf < work; sf++) { int flav = sf; int site = flav % vol5d; flav /= vol5d; int x[4], s; int si=site; x[0]=si%this->node_latt[0]; si=si/this->node_latt[0]; x[1]=si%this->node_latt[1]; si=si/this->node_latt[1]; x[2]=si%this->node_latt[2]; si=si/this->node_latt[2]; x[3]=si%this->node_latt[3]; s =si/this->node_latt[3]; int sp = this->precon_5d ? s : 0; if ( (x[0]+x[1]+x[2]+x[3] + (sp &0x1)) == cb ) { int bidx_base; int cidx_base; #ifdef BFM_GPARITY bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1,flav); cidx_base = cps::GJP.Gparity() ? this->cps_idx_cb_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx_cb(x, s, 0, 0, Nspinco); #else bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1); cidx_base = this->cps_idx_cb(x, s, 0, 0, Nspinco); #endif for ( int co=0;co<Nspinco;co++ ) { for ( int reim=0;reim<2;reim++ ) { // int bidx = bagel_idx(x, reim, co + Nspinco * (s / 2), Nspinco * this->cbLs, 1); // int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1); // int cidx = cps_idx_cb(x, s, reim, co, Nspinco); int bidx = bidx_base + reim + co * i_inc; int cidx = cidx_base + reim + co * 2; if ( doimport ) bagel[bidx] = psi[cidx]; else psi[cidx] = bagel[bidx] ; }}//co,reim }//cb }//xyzts } //Convert a bfm-style Fermion_t pair to or from a CANONICAL format CPS-style fermion //if doimport == 0 psi is the output and handle the input //if doimport == 1 handle is the output and psi the input template <class Float> template<typename FloatEXT> void bfm_evo<Float>::cps_impexFermion(FloatEXT *psi, Fermion_t handle[2], int doimport) { int Nspinco=12; int i_inc = this->simd() * 2; int vol5d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3] * this->Ls; omp_set_num_threads(this->nthread); Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] }; int work = vol5d; if(cps::GJP.Gparity()) work*=2; #pragma omp parallel for for (int sf = 0; sf < work; sf++) { int flav = sf; int site = flav % vol5d; flav /= vol5d; int x[4], s; int si=site; x[0]=si%this->node_latt[0]; si=si/this->node_latt[0]; x[1]=si%this->node_latt[1]; si=si/this->node_latt[1]; x[2]=si%this->node_latt[2]; si=si/this->node_latt[2]; x[3]=si%this->node_latt[3]; s =si/this->node_latt[3]; int sp = this->precon_5d ? s : 0; int cb = x[0]+x[1]+x[2]+x[3]+sp &0x1; int bidx_base; int cidx_base; #ifdef BFM_GPARITY bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1, flav); cidx_base = cps::GJP.Gparity() ? this->cps_idx_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx(x, s, 0, 0, Nspinco); #else bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1); cidx_base = this->cps_idx(x, s, 0, 0, Nspinco); #endif for ( int co=0;co<Nspinco;co++ ) { for ( int reim=0;reim<2;reim++ ) { // int bidx = bagel_idx(x, reim, co + Nspinco * (s / 2), Nspinco * this->cbLs, 1); // int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1); // int cidx = cps_idx(x, s, reim, co, Nspinco); int bidx = bidx_base + reim + co * i_inc; int cidx = cidx_base + reim + co * 2; if ( doimport ) bagel[cb][bidx] = psi[cidx]; else psi[cidx] = bagel[cb][bidx]; }}//co, reim }//xyzts } //Convert a bfm style Fermion_t pair (left,right) to a 's-ordered' fermion //if doimport == 0 the input is handle and the output psi //if doimport == 1 the input is psi and the output handle template <class Float> template<typename FloatEXT> void bfm_evo<Float>::cps_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport) { int Nspinco=12; int i_inc = this->simd() * 2; int vol5d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3] * this->Ls; omp_set_num_threads(this->nthread); Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] }; int work = vol5d; if(cps::GJP.Gparity()) work*=2; #pragma omp parallel for for (int sf = 0; sf < work; sf++) { int flav = sf; int site = flav % vol5d; flav /= vol5d; int x[4], s; int si=site; s =si%this->Ls; si=si/this->Ls; x[0]=si%this->node_latt[0]; si=si/this->node_latt[0]; x[1]=si%this->node_latt[1]; si=si/this->node_latt[1]; x[2]=si%this->node_latt[2]; x[3]=si/this->node_latt[2]; int sp = this->precon_5d ? s : 0; int cb = (x[0]+x[1]+x[2]+x[3]+sp) & 0x1; int bidx_base; int cidx_base; #ifdef BFM_GPARITY bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1, flav); cidx_base = cps::GJP.Gparity() ? this->cps_idx_s_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx_s(x, s, 0, 0, Nspinco); #else bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1); cidx_base = this->cps_idx_s(x, s, 0, 0, Nspinco); #endif for ( int co=0;co<Nspinco;co++ ) { for ( int reim=0;reim<2;reim++ ) { // int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1); // int cidx = cps_idx_s(x, s, reim, co, Nspinco); int bidx = bidx_base + reim + co * i_inc; int cidx = cidx_base + reim + co * 2; if ( doimport ) bagel[cb][bidx] = psi[cidx]; else psi[cidx] = bagel[cb][bidx]; }}//co, reim }//xyzts } #if 0 //This is check code template <class Float> template<typename FloatEXT> void bfm_evo<Float>::thread_impexFermion_s_test(FloatEXT *psi, Fermion_t handle[2], int doimport) { int Nspinco=12; int i_inc = this->simd() * 2; int vol5d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3] * this->Ls; int me, thrlen, throff; int work = vol5d; if(cps::GJP.Gparity()) work*=2; this->thread_work(work, me, thrlen, throff); Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] }; for (int site = 0; site < thrlen; ++site) { int flav = site + throff; int site = flav % vol5d; flav /= vol5d; int x[4], s; int si=site; s =si%this->Ls; si=si/this->Ls; x[0]=si%this->node_latt[0]; si=si/this->node_latt[0]; x[1]=si%this->node_latt[1]; si=si/this->node_latt[1]; x[2]=si%this->node_latt[2]; x[3]=si/this->node_latt[2]; int sp = this->precon_5d ? s : 0; int cb = x[0]+x[1]+x[2]+x[3]+sp & 0x1; int bidx_base; int cidx_base; if(cps::GJP.Gparity()){ bidx_base = this->bagel_gparity_idx5d(x, s, 0, 0, Nspinco, 1, flav); cidx_base = this->cps_idx_s_gparity(x, s, 0, 0, Nspinco, flav); }else{ bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1); cidx_base = this->cps_idx_s(x, s, 0, 0, Nspinco); } for ( int co=0;co<Nspinco;co++ ) { for ( int reim=0;reim<2;reim++ ) { // int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1); // int cidx = cps_idx_s(x, s, reim, co, Nspinco); int bidx = bidx_base + reim + co * i_inc; int cidx = cidx_base + reim + co * 2; if ( doimport ) bagel[cb][bidx] = psi[cidx]; else psi[cidx] = bagel[cb][bidx]; }}//co, reim }//xyzts } #endif //Convert a bfm style Fermion_t pair (left,right) to a 's-ordered' fermion //if doimport == 0 the input is handle and the output psi //if doimport == 1 the input is psi and the output handle template <class Float> template<typename FloatEXT> void bfm_evo<Float>::thread_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport) { int Nspinco=12; int i_inc = this->simd() * 2; int vol5d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3] * this->Ls; int me, thrlen, throff; int work = vol5d; if(cps::GJP.Gparity()) work*=2; this->thread_work(work, me, thrlen, throff); Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] }; for (int sf = 0; sf < thrlen; ++sf) { int flav = sf + throff; int site = flav % vol5d; flav /= vol5d; int x[4], s; int si=site; s =si%this->Ls; si=si/this->Ls; x[0]=si%this->node_latt[0]; si=si/this->node_latt[0]; x[1]=si%this->node_latt[1]; si=si/this->node_latt[1]; x[2]=si%this->node_latt[2]; x[3]=si/this->node_latt[2]; int sp = this->precon_5d ? s : 0; int cb = x[0]+x[1]+x[2]+x[3]+sp & 0x1; int bidx_base; int cidx_base; #ifdef BFM_GPARITY bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1, flav); cidx_base = cps::GJP.Gparity() ? this->cps_idx_s_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx_s(x, s, 0, 0, Nspinco); #else bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1); cidx_base = this->cps_idx_s(x, s, 0, 0, Nspinco); #endif for ( int co=0;co<Nspinco;co++ ) { for ( int reim=0;reim<2;reim++ ) { // int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1); // int cidx = cps_idx_s(x, s, reim, co, Nspinco); int bidx = bidx_base + reim + co * i_inc; int cidx = cidx_base + reim + co * 2; if ( doimport ) bagel[cb][bidx] = psi[cidx]; else psi[cidx] = bagel[cb][bidx]; }}//co, reim }//xyzts } template <class Float> Float * bfm_evo<Float>::threadedAllocFloat(size_t size, int mem_type) { int me = this->thread_barrier(); void *ret; if ( me == 0 ) { ret = bfm_alloc(size * sizeof(Float), mem_type); } ret = this->thread_bcast(me, ret); this->thread_barrier(); return (Float *)ret; } template <class Float> void bfm_evo<Float>::threadedFreeFloat(Float *f) { int me = this->thread_barrier(); if ( me == 0 ) { bfm_free(f); } this->thread_barrier(); } static inline int idx_4d(const int x[4], const int lx[4]) { int ret = 0; for(int i = 3; i >= 0; --i) { ret = ret * lx[i] + x[i]; } return ret; } static inline int idx_5d(const int x[5], const int lx[5]) { int ret = 0; for(int i = 4; i >= 0; --i) { ret = ret * lx[i] + x[i]; } return ret; } static inline int idx_4d_surf(const int x[4], const int lx[4], int mu) { int ret = 0; for(int i = 3; i >= 0; --i) { if(i == mu) continue; ret = ret * lx[i] + x[i]; } return ret; } static inline int idx_5d_surf(const int x[5], const int lx[5], int mu) { int ret = 0; for(int i = 4; i >= 0; --i) { if(i == mu) continue; ret = ret * lx[i] + x[i]; } return ret; } //CK: Appears to assume 'importme' is in canonical ordering template <class Float> template<typename FloatEXT> void bfm_evo<Float>::cps_importGauge(FloatEXT *importme) { int u_sz = Nd; if(cps::GJP.Gparity()) u_sz *= 2; //U* fields are stacked on second set of Nd LatticeColorMatrix objects in the array multi1d<LatticeColorMatrix> U(u_sz); omp_set_num_threads(this->nthread); int Ndircoco=72; int Ncoco = 9; QDPdouble *U_p; int vol4d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; for (int muu=0;muu<u_sz;muu++) { U_p = (QDPdouble *)&(U[muu].elem(0).elem()); int flav = muu / Nd; int mu = muu % Nd; #pragma omp parallel for for (int site=0;site<vol4d;site++ ) { int x[4]; int s=site; x[0]=s%this->node_latt[0]; s/=this->node_latt[0]; x[1]=s%this->node_latt[1]; s/=this->node_latt[1]; x[2]=s%this->node_latt[2]; s/=this->node_latt[2]; x[3]=s%this->node_latt[3]; int qidx_base = this->chroma_idx(x, 0, 0, Ncoco); for(int coco = 0; coco < Ncoco; ++coco) { for ( int reim = 0; reim < 2; ++reim) { // int qidx = this->chroma_idx(x,reim,coco,Ncoco); int qidx = qidx_base + reim + coco * 2; int siteoff = mu + Nd * site + flav*Nd*vol4d; //Second G-parity flavour offset by Nd*vol4d int cidx = reim + 2 * (coco + Ncoco * siteoff); U_p[qidx] = importme[cidx]; }} // reim,coco } // x }//mu // to bfm this->importGauge(U); } //CK: phi1 = Mprec phi2 for fermionic vectors. //Calculates: (odd,even) //v2 = (Boo phi2, Bee MeeInv Meo phi2) //v1 = (phi1, MeeInv^dag Meo^dag phi1) template <class Float> void bfm_evo<Float>::calcMDForceVecs(Fermion_t v1[2], Fermion_t v2[2], Fermion_t phi1, Fermion_t phi2) { // Meo is Wilson D times a matrix (see page 27 in Peter's draft). // Moe/Meo: check bfmbase<Float>::G5D_Meo() in bfmdperp.C. // Mee/Moo: check bfmbase<Float>::G5D_Mooee(). // Mee/Moo inverse: check bfmbase<Float>::G5D_MooeeInv(). //2kappa = 1/(5-M5) // v2e = Bee * 2kappa * Meo phi2 this->Meo(phi2, v1[Odd], Even, DaggerNo); //Uses v1[Odd] as temp storage this->MooeeInv(v1[Odd], v1[Even], DaggerNo); this->Booee(v1[Even], v2[Even], DaggerNo); // v2o = Boo phi2 this->Booee(phi2, v2[Odd], DaggerNo); // v1e = 2kappa Meo^dag phi1 this->Meo(phi1, v1[Odd], Even, DaggerYes); this->MooeeInv(v1[Odd], v1[Even], DaggerYes); //CK: For WilsonTM, comparison to CPS version //MooeeInv = 2 kappa g5theta(ctheta,-stheta) //kappa = 1/[2 sqrt( (m+4)^2 + eps^2 )] //ctheta = 2 (m+4) kappa //stheta = 2 eps kappa //g5theta(ctheta,stheta) = ctheta + i stheta g5 // v1o = 1oo phi1 this->copy(v1[Odd], phi1); } template <class Float> void bfm_evo<Float>::Booee(Fermion_t psi, Fermion_t chi, int dag) { int Pminus=-1; int Pplus=1; // just copied the relevant part in G5D_Meo() over. if ( (this->solver == HmCayleyTanh) || (this->solver == HtCayleyTanh) || (this->solver == HwCayleyTanh) || (this->solver == HwCayleyZolo) || (this->solver == HtCayleyZolo) ) { if ( dag ) { // Assemble the 5d matrix for(int s=0;s<this->Ls;s++){ if ( s==0 ) { this->axpby_ssp_proj(chi,this->beo[s],psi, -this->ceo[s+1] ,psi,s,s+1,Pplus); this->axpby_ssp_proj(chi, 1.0,chi,this->mass*this->ceo[this->Ls-1],psi,s,this->Ls-1,Pminus); } else if ( s==(this->Ls-1)) { this->axpby_ssp_proj(chi,this->beo[s],psi,this->mass*this->ceo[0],psi,s,0,Pplus); this->axpby_ssp_proj(chi,1.0,chi,-this->ceo[s-1],psi,s,s-1,Pminus); } else { this->axpby_ssp_proj(chi,this->beo[s],psi,-this->ceo[s+1],psi,s,s+1,Pplus); this->axpby_ssp_proj(chi,1.0 ,chi,-this->ceo[s-1],psi,s,s-1,Pminus); } } } else { // Assemble the 5d matrix for(int s=0;s<this->Ls;s++){ if ( s==0 ) { // chi = bs psi[s] + cs[s] psi[s+1} // chi += -mass*cs[s] psi[s+1} this->axpby_ssp_proj(chi,this->beo[s],psi,-this->ceo[s],psi ,s, s+1,Pminus); this->axpby_ssp_proj(chi,1.0,chi,this->mass*this->ceo[s],psi,s,this->Ls-1,Pplus); } else if ( s==(this->Ls-1)) { this->axpby_ssp_proj(chi,this->beo[s],psi,this->mass*this->ceo[s],psi,s,0,Pminus); this->axpby_ssp_proj(chi,1.0,chi,-this->ceo[s],psi,s,s-1,Pplus); } else { this->axpby_ssp_proj(chi,this->beo[s],psi,-this->ceo[s],psi,s,s+1,Pminus); this->axpby_ssp_proj(chi,1.0,chi,-this->ceo[s],psi,s,s-1,Pplus); } } } } else if(this->solver == DWF && this->precon_5d == 1) { // Booee is the identity matrix in this case. this->copy(chi, psi); return; } else if(this->solver == WilsonTM && this->precon_5d ==0){ //CK: I hope this is correct this->copy(chi, psi); return; } else { if ( this->isBoss() ) { printf("Booee: method not implemented for this fermion type / preconditioning type\n"); } exit(-1); } } static inline double quad_solve(double *ct, double *st, double a, double b, double c, double d, double e, double f) { double p = b * (d - f) + e * (c - a); double q = b * (d + f) - e * (c + a); double r = 2 * (c * d - a * f); // solve p + q * cos(2t) + r * sin(2t) = 0 double den = sqrt(q * q + r * r); double ca = q / den; double ci = sqrt(0.5 * (1 + ca)); double si = sqrt(0.5 * (1 - ca)); if(r < 0) si = -si; double cb = -p / den; if(fabs(cb) > 1.) { printf("Panic: cos(psi) > 1\n"); exit(-1); } double cj = sqrt(0.5 * (1 + cb)); double sj = sqrt(0.5 * (1 - cb)); double ct1 = ci * cj + si * sj; double st1 = si * cj - ci * sj; double v1 = (a * ct1 * ct1 + b * st1 * ct1 + c * st1 * st1) / (d * ct1 * ct1 + e * st1 * ct1 + f * st1 * st1); double ct2 = ci * cj - si * sj; double st2 = si * cj + ci * sj; double v2 = (a * ct2 * ct2 + b * st2 * ct2 + c * st2 * st2) / (d * ct2 * ct2 + e * st2 * ct2 + f * st2 * st2); if(v1 < v2) { *ct = ct1; *st = st1; return v1; } else { *ct = ct2; *st = st2; return v2; } } // Ritz method used to compute the maximum/minimum eigenvalue of M^\dag M. // Use algorithm presented in arXiv: hep-lat/9507023. template <class Float> double bfm_evo<Float>::ritz(Fermion_t x, int compute_min) { int me = this->thread_barrier(); double stop_rsd = this->residual * this->residual; Fermion_t y = this->threadedAllocFermion(); Fermion_t p = this->threadedAllocFermion(); Fermion_t z = this->threadedAllocFermion(); Fermion_t t = this->threadedAllocFermion(); Fermion_t u = this->threadedAllocFermion(); double mu, pnorm, gnorm2; // normalize x double fact = this->norm(x); fact = sqrt(1./ fact); this->scale(x, fact); if(this->isBoss() && !me) { printf("bfm_evo::ritz <x, x> = %17.10e\n", 1. / (fact * fact)); } // y = A x, A = MdagM or -MdagM mu = this->Mprec(x, t, y, 0, 1); // t = Mpc x (y temp) this->Mprec(t, y, u, 1); //y = Mpc^dag t (u temp) if(! compute_min) { this->scale(y, -1.); //y=-y mu = -mu; } gnorm2 = this->axpy_norm(p, x, y, -mu); //p = -mu * x + y pnorm = sqrt(gnorm2); int i; for(i = 0; i < this->max_iter; ++i) { if(this->isBoss() && !me && i%100==0) { printf("bfm_evo::ritz iter = %6d gnorm2 = %17.10e, targ gnorm2 = %17.10e, mu = %17.10e\n", i, gnorm2, stop_rsd, mu); } if(gnorm2 < stop_rsd) break; // if(i % 100 == 0 && this->isBoss() && !me) { // printf("bfm_evo::ritz iter = %6d gnorm2 = %17.10e, mu = %17.10e\n", i, gnorm2, mu); // } // z = A p double pap = this->Mprec(p, t, z, 0, 1); this->Mprec(t, z, u, 1); if(! compute_min) { this->scale(z, -1.); pap = -pap; } // minimize x cos(theta) + p / pnorm * sin(theta) via theta double d = this->norm(x); double e = 2. * this->inner_real(x, p) / pnorm; double f = 1.; // double a = this->inner_real(x, y); double a = mu * d; double b = 2. * this->inner_real(x, z) / pnorm; double c = pap / (pnorm * pnorm); double ct,st; mu = quad_solve(&ct, &st, a, b, c, d, e, f); this->axpby(x, x, p, ct, st / pnorm); this->axpby(y, y, z, ct, st / pnorm); double gnew = this->axpy_norm(t, x, y, -mu); double beta = ct * gnew / gnorm2; gnorm2 = gnew; // this->axpy(u, x, p, -st * pnorm); // ! not stable double xpp = this->inner_real(x, p); this->axpy(u, x, p, -xpp); pnorm = sqrt(this->axpy_norm(p, u, t, beta)); } if(! compute_min) mu = -mu; // check eigenvalue again double xnorm = this->norm(x); double mux = this->Mprec(x, y, t, 0, 1); this->Mprec(y, t, u, 1); double mu_sq = this->norm(t); if(this->isBoss() && !me) { if(i < this->max_iter) { printf("bfm_evo::ritz converged at iteration %d.\n", i); } else { printf("bfm_evo::ritz maximum iteration number reached!\n"); } printf("bfm_evo::ritz ||x|| = %17.10e\n", sqrt(xnorm)); printf("bfm_evo::ritz three ways of computing the eigenvalue should agree.\n"); printf("bfm_evo::ritz eig1 = %17.10e\n", mu / xnorm); printf("bfm_evo::ritz eig2 = %17.10e\n", mux / xnorm); printf("bfm_evo::ritz eig3 = %17.10e\n", sqrt(mu_sq / xnorm)); } this->threadedFreeFermion(y); this->threadedFreeFermion(p); this->threadedFreeFermion(z); this->threadedFreeFermion(t); this->threadedFreeFermion(u); return mu / xnorm; } // FIXME: I'll need to replace getPlusData by something else. // For now it works. #include <comms/scu.h> template <class Float> void bfm_evo<Float>::copySendFrmData(Float v3d[], Float v4d[], int mu, bool send_neg) { int lclx[5] = {this->node_latt[0], this->node_latt[1], this->node_latt[2], this->node_latt[3], this->Ls}; int low[4] = { 0, 0, 0, 0 }; int high[4] = {lclx[0], lclx[1], lclx[2], lclx[3] }; low[mu] = send_neg ? 0 : lclx[mu] - 1; //pick out the slice at the boundary in the send direction high[mu] = low[mu] + 1; int block_size = 24 * lclx[4]; // s inner most const int hl[4] = {high[0] - low[0], high[1] - low[1], high[2] - low[2], high[3] - low[3] }; const int hl_sites = hl[0] * hl[1] * hl[2] * hl[3]; //3-volume on surface (hl[mu]=1) [in units of blocks of size 24*Ls] const int vol4d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; int me, thrlen, throff; int work = hl_sites; if(cps::GJP.Gparity()) work *=2; this->thread_work(work, me, thrlen, throff); for(int i = 0; i < thrlen; ++i) { int x[4], flav; int tmp = i + throff; //For G-parity, fermion data blocks [each of size 24*Ls] increment in x,y,z,t,flav //Use similar mapping for surface volume, with flav changing slowest (offset 1 * surface 3-volume blocks) flav = tmp/hl_sites; tmp = tmp % hl_sites; x[0] = tmp % hl[0] + low[0]; tmp /= hl[0]; x[1] = tmp % hl[1] + low[1]; tmp /= hl[1]; x[2] = tmp % hl[2] + low[2]; tmp /= hl[2]; x[3] = tmp % hl[3] + low[3]; int off_4d = idx_4d(x, lclx); int off_3d = idx_4d_surf(x, lclx, mu); if(cps::GJP.Gparity()){ //Implement G-parity flavour twist where appropriate. Note that the boundary sign on the boundary between C \bar{u}^T and d fields is implemented on the gauge links //here so we do not need to explicitly apply it to the communicated data. if(cps::GJP.Bc(mu) == cps::BND_CND_GPARITY && (send_neg && cps::GJP.NodeCoor(mu) == 0) || (!send_neg && cps::GJP.NodeCoor(mu) == cps::GJP.Nodes(mu)-1) ){ if(flav==0) memcpy(v3d + off_3d * block_size + hl_sites * block_size, v4d + off_4d * block_size, sizeof(Float) * block_size); //d -> CubarT buf else memcpy(v3d + off_3d * block_size, v4d + off_4d * block_size + vol4d * block_size, sizeof(Float) * block_size); //CubarT -> d buf }else{ //copy both flavours to their respective buffers memcpy(v3d + off_3d * block_size, v4d + off_4d * block_size, sizeof(Float) * block_size); //d -> d memcpy(v3d + off_3d * block_size + hl_sites * block_size, v4d + off_4d * block_size + vol4d * block_size, sizeof(Float) * block_size); //CubarT -> CubarT } }else{ memcpy(v3d + off_3d * block_size, v4d + off_4d * block_size, sizeof(Float) * block_size); } } } // Calculate fermion force on a specific site, also do the // summation over s direction. // // FIXME: need to add a line sum in s direction to support splitting // in s direction. //CK: v1p = v1[x+mu] // fermion vectors appear to be in CANONICAL ordering template<class Float> void bfm_evo<Float>::fforce_site(Float *mom, Float *gauge, Float *v1, Float *v1p, Float *v2, Float *v2p, int mu, Float coef,int gpf1_offset_p) { Float t1[18], t2[18]; if(cps::GJP.Gparity()) printf("flav 0\n"); printf("v1: "); printf("%f %f ... %f",v1[0],v1[1],v1[24*this->Ls-1]); printf("\n"); printf("v2: "); printf("%f %f ... %f",v2[0],v2[1],v2[24*this->Ls-1]); printf("\n"); printf("v1p: "); printf("%f %f ... %f",v1p[0],v1p[1],v1p[24*this->Ls-1]); printf("\n"); printf("v2p: "); printf("%f %f ... %f",v2p[0],v2p[1],v2p[24*this->Ls-1]); printf("\n"); printf("gauge: %f %f ...%f\n",gauge[0],gauge[1],gauge[17]); printf("mom: %f %f ...%f\n",mom[0],mom[1],mom[17]); switch(mu) { case 0: bfm_evo_aux::sprojTrXm(t1, v1p, v2, this->Ls, 0, 0); bfm_evo_aux::sprojTrXp(t2, v2p, v1, this->Ls, 0, 0); break; case 1: bfm_evo_aux::sprojTrYm(t1, v1p, v2, this->Ls, 0, 0); bfm_evo_aux::sprojTrYp(t2, v2p, v1, this->Ls, 0, 0); break; case 2: bfm_evo_aux::sprojTrZm(t1, v1p, v2, this->Ls, 0, 0); bfm_evo_aux::sprojTrZp(t2, v2p, v1, this->Ls, 0, 0); break; default: bfm_evo_aux::sprojTrTm(t1, v1p, v2, this->Ls, 0, 0); bfm_evo_aux::sprojTrTp(t2, v2p, v1, this->Ls, 0, 0); } printf("Minus proj contrib: %f %f ... %f\n",t1[0],t1[1],t1[17]); printf("Plus proj contrib: %f %f ... %f\n",t2[0],t2[1],t2[17]); bfm_evo_aux::su3_add(t1, t2); //t1 -> t1 + t2 bfm_evo_aux::mDotMEqual(t2, gauge, t1); //t2 -> gauge * t1 bfm_evo_aux::trless_am(t2, -coef); printf("Traceless AHmat contrib: %f %f ... %f\n",t2[0],t2[1],t2[17]); if(cps::GJP.Gparity1fX()) for(int i=0;i<18;i++) t2[i]*=2.0; //double latt testing, not production code bfm_evo_aux::su3_add(mom, t2); if(cps::GJP.Gparity()){ //add force from second flavour Float t1_f1[18], t2_f1[18]; const int vol4d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; const int f1_off = 24*this->Ls * vol4d; //f1 offset by 5d volume in this ordering scheme v1+=f1_off; v2+=f1_off; v1p+=gpf1_offset_p; v2p+=gpf1_offset_p; //offset for 'plus' site depends on whether the data is stored in the buffer or the on-node vector printf("flav 1\n"); printf("v1: "); printf("%f %f ... %f",v1[0],v1[1],v1[24*this->Ls-1]); printf("\n"); printf("v2: "); printf("%f %f ... %f",v2[0],v2[1],v2[24*this->Ls-1]); printf("\n"); printf("v1p: "); printf("%f %f ... %f",v1p[0],v1p[1],v1p[24*this->Ls-1]); printf("\n"); printf("v2p: "); printf("%f %f ... %f",v2p[0],v2p[1],v2p[24*this->Ls-1]); printf("\n"); Float *gauge_f1 = gauge + vol4d*18*4; Float *mom_f1 = mom + vol4d*18*4; printf("gauge: %f %f ...%f\n",gauge_f1[0],gauge_f1[1],gauge_f1[17]); printf("mom: %f %f ...%f\n",mom_f1[0],mom_f1[1],mom_f1[17]); switch(mu) { case 0: bfm_evo_aux::sprojTrXp(t1_f1, v1, v2p, this->Ls, 0, 0); bfm_evo_aux::sprojTrXm(t2_f1, v2, v1p, this->Ls, 0, 0); break; case 1: bfm_evo_aux::sprojTrYp(t1_f1, v1, v2p, this->Ls, 0, 0); bfm_evo_aux::sprojTrYm(t2_f1, v2, v1p, this->Ls, 0, 0); break; case 2: bfm_evo_aux::sprojTrZp(t1_f1, v1, v2p, this->Ls, 0, 0); bfm_evo_aux::sprojTrZm(t2_f1, v2, v1p, this->Ls, 0, 0); break; default: bfm_evo_aux::sprojTrTp(t1_f1, v1, v2p, this->Ls, 0, 0); bfm_evo_aux::sprojTrTm(t2_f1, v2, v1p, this->Ls, 0, 0); } { cps::Matrix a; a.Trans(t1_f1); Float *aa = (Float*) &a[0]; cps::Matrix b; b.Trans(t2_f1); Float *bb = (Float*) &b[0]; printf("Minus proj contrib: %f %f ... %f\n",aa[0],aa[1],aa[17]); printf("Plus proj contrib: %f %f ... %f\n",bb[0],bb[1],bb[17]); } bfm_evo_aux::su3_add(t1_f1, t2_f1); //t1_f1 -> t1_f1 + t2_f1 //set it up to use the f1 gauge field (sign*U*), such that the boundary sign comes free //this will need to be complex conjugated bfm_evo_aux::mStarDotMTransEqual(t2_f1, gauge_f1, t1_f1); // do (U*)* t^T bfm_evo_aux::trless_am(t2_f1, -coef); printf("Traceless AHmat contrib: %f %f ... %f\n",t2_f1[0],t2_f1[1],t2_f1[17]); bfm_evo_aux::su3_add(mom, t2_f1); //setup momentum for the second flavour for(int i=1;i<18;i+=2){ t2[i]*=-1; t2_f1[i]*=-1; } //mom[f1] is mom[f0]* bfm_evo_aux::su3_add(mom_f1, t2); bfm_evo_aux::su3_add(mom_f1, t2_f1); } } template<class Float> void bfm_evo<Float>::fforce_internal(Float *mom, Float *gauge, Float *v1, Float *v2, // internal data Float coef, int mu, int me, int nthreads) { int lclx[5] = {this->node_latt[0], this->node_latt[1], this->node_latt[2], this->node_latt[3], this->Ls}; int low[4] = { 0, 0, 0, 0 }; int high[4] = { lclx[0], lclx[1], lclx[2], lclx[3] }; --high[mu]; //exclude the site on the boundary int block_size = 24 * lclx[4]; const int hl[4] = {high[0] - low[0], high[1] - low[1], high[2] - low[2], high[3] - low[3] }; const int hl_sites = hl[0] * hl[1] * hl[2] * hl[3]; const int gparity_vp_off = block_size * lclx[0] * lclx[1] * lclx[2] * lclx[3]; //offset of second flavour (not used when G-parity is off) // note: some of the threads are dedicated to communication. There // must be exactly *nthreads* threads executing this function, the // variable *me* must range from 0 to nthreads - 1, inclusive. int thrlen, throff; this->thread_work_partial_nobarrier(hl_sites, me, nthreads, thrlen, throff); for(int i = 0; i < thrlen; ++i) { int x[4]; int tmp = i + throff; x[0] = tmp % hl[0] + low[0]; tmp /= hl[0]; x[1] = tmp % hl[1] + low[1]; tmp /= hl[1]; x[2] = tmp % hl[2] + low[2]; tmp /= hl[2]; x[3] = tmp % hl[3] + low[3]; int off_4d = idx_4d(x, lclx); int gid = mu + 4 * off_4d; int fid = block_size * off_4d; int y[4] = {x[0], x[1], x[2], x[3]}; ++y[mu]; int fidp = block_size * idx_4d(y, lclx); //testing int gx[4] = { x[0] + cps::GJP.XnodeSites()*cps::GJP.XnodeCoor(), x[1] + cps::GJP.YnodeSites()*cps::GJP.YnodeCoor(), x[2] + cps::GJP.ZnodeSites()*cps::GJP.ZnodeCoor(), x[3] + cps::GJP.TnodeSites()*cps::GJP.TnodeCoor() }; if(cps::GJP.Gparity1fX()){ int flav = 0; if( gx[0] >= cps::GJP.XnodeSites()*cps::GJP.Xnodes()/2 ){ gx[0] -= cps::GJP.XnodeSites()*cps::GJP.Xnodes()/2; flav = 1; } printf("1f GP coord (%d %d %d %d) flav %d\n",gx[0],gx[1],gx[2],gx[3],flav); }else if(cps::GJP.Gparity()){ printf("2f GP coord (%d %d %d %d)\n",gx[0],gx[1],gx[2],gx[3]); } //Note fforce_site computes the force on this site from both flavours in the case of G-parity BCs this->fforce_site(mom + 18 * gid, gauge + 18 * gid, v2 + fid, v2 + fidp, v1 + fid, v1 + fidp, mu, coef, gparity_vp_off); } //GPARITY TESTING: COMPARE 1F AND 2F METHODS (NOT USED IN PRODUCTION CODE) if(cps::GJP.Gparity1fX() && me==0){ //use only first thread for this (does not need to be fast as it is only testing) this->thread_barrier(); printf("Patching up 1f G-parity force\n"); //want p_0' = p_0 + delta p_0 + cconj(delta p_1) // p_1' = p_1 + delta p_1 + cconj(delta p_0) //we did p_i' = p_i + 2 * delta p_i //and we know p_1 = cconj(p_0) //so we now do p_0' = 0.5* p_0' + 0.5* cconj(p_1') //so we now do p_1' = 0.5* p_1' + 0.5* cconj(p_0') //to fix this int momsz = 4*18*cps::GJP.VolNodeSites(); Float *buf = (Float *)bfm_alloc(momsz * sizeof(Float) ); for(int ii=0;ii<momsz;ii++) buf[ii] = 0.0; //Communicate \delta p from first half onto second half and vice versa Float *data_buf = mom; Float *send_buf = data_buf; Float *recv_buf = buf; if(cps::GJP.Xnodes()>1){ //pass between nodes for(int i=0;i<cps::GJP.Xnodes()/2;i++){ cps::getMinusData((Float *)recv_buf, (Float *)send_buf, momsz , 0); data_buf = recv_buf; recv_buf = send_buf; send_buf = data_buf; } }else{ //shift mom[mu] field by xsites/2 for(long i=0;i<cps::GJP.VolNodeSites();i++){ //i = (x + Lx*(y+Ly*(z+Lz*t) ) ) int x = i % cps::GJP.XnodeSites(); int pos_rem = i/cps::GJP.XnodeSites(); //(y+Ly*(z+Lz*t) int x_from = (x + cps::GJP.XnodeSites()/2) % cps::GJP.XnodeSites(); int i_from = 18*mu + 18*4*(x_from + cps::GJP.XnodeSites()*pos_rem); int i_to = 18*mu + 18*4*i; for(int j=0;j<18;j++) buf[i_to+j] = mom[i_from+j]; } data_buf = buf; } for(int i=0;i<cps::GJP.VolNodeSites();i++){ //do fixup step int mat_off = 18*mu + 18*4*i; for(int j=0;j<18;j++){ if(j%2==0) mom[mat_off+j] = mom[mat_off+j]/2.0 + data_buf[mat_off+j]/2.0; else mom[mat_off+j] = mom[mat_off+j]/2.0 - data_buf[mat_off+j]/2.0; } } bfm_free(buf); } } template<class Float> void bfm_evo<Float>::fforce_surface(Float *mom, Float *gauge, Float *v1, Float *v2, // internal data Float *v1_s, Float *v2_s, // surface data Float coef, int mu) { int lclx[5] = {this->node_latt[0], this->node_latt[1], this->node_latt[2], this->node_latt[3], this->Ls}; int low[4] = { 0, 0, 0, 0 }; int high[4] = { lclx[0], lclx[1], lclx[2], lclx[3] }; low[mu] = lclx[mu] - 1; int block_size = 24 * lclx[4]; const int hl[4] = {high[0] - low[0], high[1] - low[1], high[2] - low[2], high[3] - low[3] }; int hl_sites = hl[0] * hl[1] * hl[2] * hl[3]; int me, thrlen, throff; this->thread_work(hl_sites, me, thrlen, throff); const int gparity_vp_off = block_size * hl_sites; //offset of second flavour (not used when G-parity is off) for(int i = 0; i < thrlen; ++i) { int x[4]; int tmp = i + throff; x[0] = tmp % hl[0] + low[0]; tmp /= hl[0]; x[1] = tmp % hl[1] + low[1]; tmp /= hl[1]; x[2] = tmp % hl[2] + low[2]; tmp /= hl[2]; x[3] = tmp % hl[3] + low[3]; int off_4d = idx_4d(x, lclx); int gid = mu + 4 * off_4d; int fid = block_size * off_4d; int fid_s = block_size * idx_4d_surf(x, lclx, mu); this->fforce_site(mom + 18 * gid, gauge + 18 * gid, v2 + fid, v2_s + fid_s, v1 + fid, v1_s + fid_s, mu, coef,gparity_vp_off); } //GPARITY TESTING: COMPARE 1F AND 2F METHODS (NOT USED IN PRODUCTION CODE) if(cps::GJP.Gparity1fX() && me==0){ //use only first thread for this (does not need to be fast as it is only testing) this->thread_barrier(); printf("Patching up 1f G-parity force\n"); //want p_0' = p_0 + delta p_0 + cconj(delta p_1) // p_1' = p_1 + delta p_1 + cconj(delta p_0) //we did p_i' = p_i + 2 * delta p_i //and we know p_1 = cconj(p_0) //so we now do p_0' = 0.5* p_0' + 0.5* cconj(p_1') //so we now do p_1' = 0.5* p_1' + 0.5* cconj(p_0') //to fix this int momsz = 4*18*cps::GJP.VolNodeSites(); Float *buf = (Float *)bfm_alloc(momsz * sizeof(Float) ); for(int ii=0;ii<momsz;ii++) buf[ii] = 0.0; //Communicate \delta p from first half onto second half and vice versa Float *data_buf = mom; Float *send_buf = data_buf; Float *recv_buf = buf; if(cps::GJP.Xnodes()>1){ //pass between nodes for(int i=0;i<cps::GJP.Xnodes()/2;i++){ cps::getMinusData((Float *)recv_buf, (Float *)send_buf, momsz , 0); data_buf = recv_buf; recv_buf = send_buf; send_buf = data_buf; } }else{ //shift mom[mu] field by xsites/2 for(long i=0;i<cps::GJP.VolNodeSites();i++){ //i = (x + Lx*(y+Ly*(z+Lz*t) ) ) int x = i % cps::GJP.XnodeSites(); int pos_rem = i/cps::GJP.XnodeSites(); //(y+Ly*(z+Lz*t) int x_from = (x + cps::GJP.XnodeSites()/2) % cps::GJP.XnodeSites(); int i_from = 18*mu + 18*4*(x_from + cps::GJP.XnodeSites()*pos_rem); int i_to = 18*mu + 18*4*i; for(int j=0;j<18;j++) buf[i_to+j] = mom[i_from+j]; } data_buf = buf; } for(int i=0;i<cps::GJP.VolNodeSites();i++){ //do fixup step int mat_off = 18*mu + 18*4*i; for(int j=0;j<18;j++){ if(j%2==0) mom[mat_off+j] = mom[mat_off+j]/2.0 + data_buf[mat_off+j]/2.0; else mom[mat_off+j] = mom[mat_off+j]/2.0 - data_buf[mat_off+j]/2.0; } } bfm_free(buf); } } // compute fermion force for Mobius class fermions: // This is the threaded equivalent of fbfm::EvolveMemFforceBase() in CPS. // // mom += coef * (phiL^\dag e_i(M) \phiR + \phiR^\dag e_i(M^\dag) \phiL) // M = M_oo - M_oe M^{-1}_ee M_eo // // IMPORTANT: at least 5 threads are needed for this function to work // correctly since we want to interleave communication and the // evaluation of internal forces. template<class Float> void bfm_evo<Float>::compute_force(Float *mom, Float *gauge, Fermion_t phiL, Fermion_t phiR, double coef) { int me = this->thread_barrier(); Fermion_t v1[2] = {this->threadedAllocFermion(), this->threadedAllocFermion()}; Fermion_t v2[2] = {this->threadedAllocFermion(), this->threadedAllocFermion()}; this->calcMDForceVecs(v1, v2, phiL, phiR); // compute various sizes int lclx[5] = {this->node_latt[0], this->node_latt[1], this->node_latt[2], this->node_latt[3], this->Ls}; int vol_5d = 24 * lclx[0] * lclx[1] * lclx[2] * lclx[3] * lclx[4]; int surf_size[4]; int surf_size_all = 0; for(int i = 0; i < 4; ++i) { surf_size[i] = vol_5d / lclx[i]; if(cps::GJP.Gparity()) surf_size[i] *=2; //2 flavours surf_size_all += 2 * surf_size[i]; } // calculate offset of surface vectors v1 and v2 int surf_v1[4], surf_v2[4]; surf_v1[0] = 0; surf_v2[0] = surf_size[0]; for(int i = 1; i < 4; ++i) { surf_v1[i] = surf_v1[i-1] + surf_size[i-1] * 2; surf_v2[i] = surf_v1[i] + surf_size[i]; } int fsize = vol_5d; if(cps::GJP.Gparity()) fsize*=2; Float *v1f = this->threadedAllocFloat(fsize); Float *v2f = this->threadedAllocFloat(fsize); Float *sndbuf = this->threadedAllocFloat(surf_size_all); Float *rcvbuf = this->threadedAllocFloat(surf_size_all); this->thread_impexFermion_s(v1f, v1, 0); this->thread_impexFermion_s(v2f, v2, 0); for(int i = 0; i < 4; ++i) { this->copySendFrmData(sndbuf + surf_v1[i], v1f, i, true); this->copySendFrmData(sndbuf + surf_v2[i], v2f, i, true); } if(this->nthread <= 4) { //#define DROPOUT_LT5THREADS #ifdef DROPOUT_LT5THREADS if(!me) { printf("compute_force: Oops, at least 5 threads are needed.\n"); } exit(-1); #else //CK: We can do it with less than 5 threads, but less efficiently (so this will work on a cluster/laptop) if(me==0){ //Do comms on single thread for(int dir=0; dir<4; dir++) cps::getPlusData(rcvbuf + surf_v1[dir], sndbuf + surf_v1[dir], surf_size[dir] * 2, dir); } for(int i = 0; i < 4; ++i) { fforce_internal(mom, gauge, v1f, v2f, coef, i, me, this->nthread); //run over however many threads we have } #endif }else{ // Fused comm/internal force. // // The last 4 threads (typically 60-63) are used for // communication. All other threads (typically 0-59) are used to // calculate internal forces. // parallelize comm/internal force calculation if(me >= this->nthread - 4) { int dir = this->nthread - me - 1; cps::getPlusData(rcvbuf + surf_v1[dir], sndbuf + surf_v1[dir], surf_size[dir] * 2, dir); } else { for(int i = 0; i < 4; ++i) { fforce_internal(mom, gauge, v1f, v2f, coef, i, me, this->nthread - 4); } } } this->thread_barrier(); for(int i = 0; i < 4; ++i) { fforce_surface(mom, gauge, v1f, v2f, rcvbuf + surf_v1[i], // v1 surface rcvbuf + surf_v2[i], // v2 surface coef, i); } this->threadedFreeFloat(v1f); this->threadedFreeFloat(v2f); this->threadedFreeFloat(sndbuf); this->threadedFreeFloat(rcvbuf); this->threadedFreeFermion(v1[0]); this->threadedFreeFermion(v1[1]); this->threadedFreeFermion(v2[0]); this->threadedFreeFermion(v2[1]); } // complex version of axpy() // template<class Float> // void bfm_evo<Float>::axpy_c(Fermion_t r, Fermion_t x, Fermion_t y, std::complex<double> a, Fermion_t tmp) // { // this->copy(tmp, x); // this->scale(tmp, std::real(a), std::imag(a)); // this->axpy(r, tmp, y, 1.0); // } // bicg_M: Biconjugate gradient method on preconditioned Dirac // operator (It never converges). // // FIXME: test code only, don't use it unless you know what you are // doing. template<class Float> int bfm_evo<Float>::bicg_M(Fermion_t sol, Fermion_t src) { int me = this->thread_barrier(); Fermion_t r = this->threadedAllocFermion(); Fermion_t rd = this->threadedAllocFermion(); Fermion_t p = this->threadedAllocFermion(); Fermion_t pd = this->threadedAllocFermion(); Fermion_t mp = this->threadedAllocFermion(); Fermion_t mdpd= this->threadedAllocFermion(); Fermion_t x = sol; Fermion_t xd = this->threadedAllocFermion(); this->copy(xd, x); Fermion_t tv1 = this->threadedAllocFermion(); Fermion_t tv2 = this->threadedAllocFermion(); const double src_norm = this->norm(src); const double stop = src_norm * this->residual * this->residual; this->Mprec(x , r , tv1, 0, 0); this->Mprec(xd, rd, tv1, 1, 0); double rnorm = this->axpy_norm(r , r , src, -1.0); // r0 <- b-M*x0 double rdnorm = this->axpy_norm(rd, rd, src, -1.0);// r0d <- b-Md*x0 if ( this->isBoss() && !me ) { printf("iter = %5d rsd = %17.10e true rsd = %17.10e\n", 0, rnorm, rnorm); } this->copy(p, r); this->copy(pd, rd); std::complex<double> rddr = this->inner(rd, r); int k = 1; for(; k <= this->max_iter; ++k) { this->Mprec(p, mp, tv1, 0, 0); this->Mprec(pd, mdpd, tv1, 1, 0); std::complex<double> pddmp = this->inner(pd, mp); std::complex<double> alpha = rddr / pddmp; this->axpy_c(x , p , x , alpha, tv1); // x <- x + alpha * p this->axpy_c(xd, pd, xd, alpha, tv1); // xd <- xd + alpha * pd this->axpy_c(r , mp , r , -alpha, tv1); // r <- r - alpha * Mp this->axpy_c(rd, mdpd, rd, -alpha, tv1); // rd <- rd - alpha * Mdpd rnorm = this->norm(r); rdnorm = this->norm(rd); // check stopping condition if(rnorm < stop) { // compute true residual this->Mprec(x, tv2, tv1, 0, 0); double true_rsd = this->axpy_norm(tv1, tv2, src, -1.0); if(this->isBoss() && !me) { printf("bicg_M: converged in %d iterations.\n", k); printf("bicg_M: acc_rsd = %9.3e %9.3e true_rsd = %9.3e\n", sqrt(rnorm/src_norm), sqrt(rdnorm/src_norm), sqrt(true_rsd/src_norm)); } break; } std::complex<double> tmp = this->inner(rd, r); std::complex<double> beta = tmp / rddr; rddr = tmp; this->axpy_c(p , p , r , beta, tv1); // p <- r + beta * p this->axpy_c(pd, pd, rd, beta, tv1); // pd <- rd + beta * pd // ====================================================================== // compare rsd and true rsd this->Mprec(x, tv2, tv1, 0, 0); double true_rsd = this->axpy_norm(tv2, tv2, src, -1.0); if ( this->isBoss() && !me ) { printf("iter = %5d rsd = %9.3e true rsd = %9.3e a = (%9.3e %9.3e) b = (%9.3e %9.3e)\n", k, rnorm, true_rsd, real(alpha), imag(alpha), real(beta), imag(beta)); } // ====================================================================== } if(k > this->max_iter) { if(this->isBoss() && !me) { printf("bicg_M: not converged in %d iterations.\n", k); } } this->threadedFreeFermion(r); this->threadedFreeFermion(rd); this->threadedFreeFermion(p); this->threadedFreeFermion(pd); this->threadedFreeFermion(mp); this->threadedFreeFermion(mdpd); this->threadedFreeFermion(xd); this->threadedFreeFermion(tv1); this->threadedFreeFermion(tv2); return k; } // bicgstab_M: Biconjugate gradient stabilized method on // preconditioned Dirac operator. // // FIXME: test code only, don't use it unless you know what you are // doing. template<class Float> int bfm_evo<Float>::bicgstab_M(Fermion_t sol, Fermion_t src) { int me = this->thread_barrier(); Fermion_t r0 = this->threadedAllocFermion(); Fermion_t r = this->threadedAllocFermion(); Fermion_t p = this->threadedAllocFermion(); Fermion_t v = this->threadedAllocFermion(); Fermion_t s = this->threadedAllocFermion(); Fermion_t t = this->threadedAllocFermion(); Fermion_t x = sol; Fermion_t tv1 = this->threadedAllocFermion(); Fermion_t tv2 = this->threadedAllocFermion(); const double src_norm = this->norm(src); const double stop = src_norm * this->residual * this->residual; this->Mprec(x, r0, tv1, 0, 0); double r0n = this->axpy_norm(r0, r0, src, -1.0); // r0 <- b-M*x0, r0^hat = r0 this->copy(r, r0); if ( this->isBoss() && !me ) { printf("iter = %5d rsd = %17.10e true rsd = %17.10e\n", 0, r0n, r0n); } std::complex<double> rho(1, 0); std::complex<double> alpha(1, 0); std::complex<double> omega(1, 0); this->set_zero(v); this->set_zero(p); int k = 1; for(; k <= this->max_iter; ++k) { std::complex<double> rho_k = this->inner(r0, r); std::complex<double> beta = rho_k / rho * alpha / omega; rho = rho_k; this->axpy_c(tv1, v, p, -omega, tv2); this->axpy_c(p, tv1, r, beta, tv2); this->Mprec(p, v, tv1, 0, 0); alpha = rho / this->inner(r0, v); this->axpy_c(s, v, r, -alpha, tv1); this->Mprec(s, t, tv1, 0, 0); omega = this->inner(t, s) / this->norm(t); this->axpy_c(x, p, x, alpha, tv1); this->axpy_c(x, s, x, omega, tv1); this->axpy_c(r, t, s, -omega, tv1); // compute true residual this->Mprec(x, tv2, tv1, 0, 0); double true_rsd = this->axpy_norm(tv1, tv2, src, -1.0); // check stopping condition if(true_rsd < stop) { if(this->isBoss() && !me) { printf("bicgstab_M: converged in %d iterations.\n", k); printf("bicgstab_M: true_rsd = %10.3e\n", sqrt(true_rsd/src_norm)); } break; } // ====================================================================== // debug information if ( this->isBoss() && !me ) { printf("iter = %5d true rsd = %10.3e " "rho = (%10.3e %10.3e) alpha = (%10.3e %10.3e) omega = (%10.3e %10.3e)\n", k, true_rsd, real(rho), imag(rho), real(alpha), imag(alpha), real(omega), imag(omega)); } // ====================================================================== } if(k > this->max_iter) { if(this->isBoss() && !me) { printf("bicgstab_M: not converged in %d iterations.\n", k); } } this->threadedFreeFermion(r0); this->threadedFreeFermion(r); this->threadedFreeFermion(p); this->threadedFreeFermion(v); this->threadedFreeFermion(s); this->threadedFreeFermion(t); this->threadedFreeFermion(tv1); this->threadedFreeFermion(tv2); return k; } #if 0 //CK: in BFM, leaving them there! // copied from Jianglei's bfm template<typename Float> double bfm_evo<Float>::CompactMprec(Fermion_t compact_psi, Fermion_t compact_chi, Fermion_t psi, Fermion_t chi, Fermion_t tmp, int dag,int donrm) { this->copy(psi, compact_psi); double result = this->Mprec(psi, chi, tmp, dag, donrm); this->copy(compact_chi, chi); return result; } // copied from Jianglei's bfm template<typename Float> void bfm_evo<Float>::CompactMunprec(Fermion_t compact_psi[2], Fermion_t compact_chi[2], Fermion_t psi[2], Fermion_t chi[2], Fermion_t tmp, int dag) { this->copy(psi[0], compact_psi[0]); this->copy(psi[1], compact_psi[1]); this->Munprec(psi, chi, tmp, dag); this->copy(compact_chi[0], chi[0]); this->copy(compact_chi[1], chi[1]); } #endif template<typename Float> void bfm_evo<Float>::deflate(Fermion_t out, Fermion_t in, const multi1d<Fermion_t [2]> *evec, const multi1d<Float> *eval, int N) { //CK: Why was this code disabled?? I have re-enabled it! //printf("void bfm_evo<Float>::deflate temporarily disabled\n"); //exit(-1); if(N == 0 || evec == NULL || eval == NULL) { if(this->isBoss()) { printf("bfm_evo::deflate() must provide eigenvectors.\n"); } exit(-1); } this->axpby(out, in, in, 0., 0.); //this->set_zero(out); for(int i = 0; i < N; ++i) { std::complex<double> dot = this->inner((*evec)[i][1], in); //#ifdef BFM_GPARITY #if 1 this->caxpy(out, (*evec)[i][1], out, dot.real() / double((*eval)[i]), dot.imag() / double((*eval)[i]) ); #else this->zaxpy(out, (*evec)[i][1], out, dot / double((*eval)[i])); #endif } } // GCR, the matrix is preconditioned M. template<class Float> int bfm_evo<Float>::gcr_M(Fermion_t sol, Fermion_t src) { printf("int bfm_evo<Float>::gcr_M temporarily disabled"); exit(-1); #if 0 int me = this->thread_barrier(); Fermion_t r = this->threadedAllocFermion(); Fermion_t gr = this->threadedAllocFermion(); Fermion_t agr = this->threadedAllocFermion(); Fermion_t p = this->threadedAllocFermion(); Fermion_t ap = this->threadedAllocFermion(); Fermion_t x = sol; Fermion_t tv1 = this->threadedAllocFermion(); Fermion_t tv2 = this->threadedAllocFermion(); const double src_norm = this->norm(src); const double stop = src_norm * this->residual * this->residual; this->Mprec(x, r, tv2, 0, 0); double rnorm = this->axpy_norm(r, r, src, -1.0); // r <- b - M x if ( this->isBoss() && !me ) { std::printf("gcr_M: iter = %5d rsd = %10.3e true rsd = %10.3e\n", 0, std::sqrt(rnorm / src_norm), std::sqrt(rnorm / src_norm)); } this->g5r5(gr, r); this->Mprec(gr, agr, tv1, 0, 0); this->copy(p, gr); this->copy(ap, agr); std::complex<double> ragr = this->inner(r, agr); int k = 1; for(; k <= this->max_iter; ++k) { double pdmmp = this->norm(ap); std::complex<double> alpha = ragr / pdmmp; this->zaxpy(x, p, x, alpha); this->zaxpy(r, ap, r, -alpha); rnorm = this->norm(r); if(rnorm < stop) { if(this->isBoss() && !me) { std::printf("gcr_M: converged in %d iterations.\n", k); std::printf("gcr_M: rsd = %10.3e\n", std::sqrt(rnorm/src_norm)); } break; } this->g5r5(gr, r); this->Mprec(gr, agr, tv2, 0, 0); std::complex<double> ragrn = this->inner(r, agr); std::complex<double> beta = ragrn / ragr; ragr = ragrn; this->zaxpy(p, p, gr, beta); this->zaxpy(ap, ap, agr, beta); // ====================================================================== // Computing true residual and other information, the // following can be removed without any effect on convergence. this->Mprec(x, tv1, tv2, 0, 0); double true_rsd = this->axpy_norm(tv1, tv1, src, -1.0); if ( this->isBoss() && !me ) { std::printf("gcr_M: iter = %5d rsd = %10.3e true_rsd = %10.3e\n", k, std::sqrt(rnorm / src_norm), std::sqrt(true_rsd / src_norm)); } // ====================================================================== } if(k > this->max_iter) { if(this->isBoss() && !me) { std::printf("gcr_M: not converged in %d iterations.\n", k); } } this->Mprec(x, tv1, tv2, 0, 0); double true_rsd = this->axpy_norm(tv1, tv1, src, -1.0); if(this->isBoss() && !me) { std::printf("gcr_M: true_rsd = %10.3e\n", std::sqrt(true_rsd/src_norm)); } this->threadedFreeFermion(r); this->threadedFreeFermion(gr); this->threadedFreeFermion(agr); this->threadedFreeFermion(p); this->threadedFreeFermion(ap); this->threadedFreeFermion(tv1); this->threadedFreeFermion(tv2); return k; #endif } // GMRES(m), we restart after m iterations. template<class Float> int bfm_evo<Float>::gmres_M(Fermion_t sol, Fermion_t src, const int m) { printf("int bfm_evo<Float>::gmres_M temporarily disabled\n"); exit(-1); #if 0 using namespace std; typedef complex<double> cmplx; int me = this->thread_barrier(); Fermion_t r = this->threadedAllocFermion(); Fermion_t w = this->threadedAllocFermion(); Fermion_t tv1 = this->threadedAllocFermion(); // the history of search directions vector<Fermion_t> v(m + 1, NULL); for(int i = 0; i <= m; ++i) { v[i] = this->threadedAllocFermion(); } vector<cmplx> H((m + 1) * m, 0); vector<cmplx> R((m + 1) * m, 0); vector<cmplx> B(m, 0); vector<cmplx> C(m, 0); vector<cmplx> S(m, 0); vector<cmplx> Y(m, 0); const double len = sqrt(this->norm(src)); const double stop = len * this->residual; this->Mprec(sol, r, tv1, 0, 0); double rsq = this->axpy_norm(r, r, src, -1.0); // r <- b - M x int j = 0; for(; j < this->max_iter / m; ++j) { double beta = sqrt(rsq); this->axpy(v[0], r, r, 1/beta - 1); // v[0] <- r / beta B.assign(m, 0); B[0] = beta; int nr = m; double rho = len; for(int i = 0; i < m; ++i) { this->Mprec(v[i], w, tv1, 0, 0); // Arnoldi iteration for(int k = 0; k <= i; ++k) { H[k*m+i] = this->inner(v[k], w); this->zaxpy(w, v[k], w, -H[k*m+i]); } double w2 = sqrt(this->norm(w)); H[(i+1)*m+i] = w2; this->axpy(v[i+1], w, w, 1/w2 - 1); R[0*m+i] = H[0*m+i]; // Givens transformation for(int k = 1; k <= i; ++k) { cmplx gamma = C[k-1] * R[(k-1)*m+i] + conj(S[k-1]) * H[k*m+i]; R[k*m+i] = -S[k-1] * R[(k-1)*m+i] + C[k-1] * H[k*m+i]; R[(k-1)*m+i] = gamma; } double rii = norm(R[i*m+i]); double hii = norm(H[(i+1)*m+i]); double delta = sqrt(rii + hii); cmplx mu, tau; if(rii < hii) { mu = R[i*m+i] / H[(i+1)*m+i]; tau = conj(mu) / abs(mu); } else { mu = H[(i+1)*m+i] / R[i*m+i]; tau = mu / abs(mu); } C[i] = sqrt(rii) / delta; S[i] = sqrt(hii) * tau / delta; R[i*m+i] = C[i] * R[i*m+i] + conj(S[i]) * H[(i+1)*m+i]; B[i+1] = -S[i] * B[i]; B[i] *= C[i]; rho = abs(B[i+1]); if(this->isBoss() && !me) { std::printf("gmres: (j i) = %4d %4d rsd = %10.3e\n", j, i, rho / len); } if(rho < stop) { nr = i; break; } } for(int k = nr - 1; k >= 0; --k) { Y[k] = B[k]; for(int i = k + 1; i < nr; ++i) { Y[k] -= R[k*m+i] * Y[i]; } Y[k] /= R[k*m+k]; this->zaxpy(sol, v[k], sol, Y[k]); } this->Mprec(sol, r, tv1, 0, 0); rsq = this->axpy_norm(r, r, src, -1.0); if(rho < stop) break; } if(j >= this->max_iter / m) { if(this->isBoss() && !me) { std::printf("gmres: not converged in %d iterations.\n", j); } } if(this->isBoss() && !me) { std::printf("gmres: true_rsd = %10.3e\n", std::sqrt(rsq) / len); } this->threadedFreeFermion(r); this->threadedFreeFermion(w); this->threadedFreeFermion(tv1); for(int i = 0; i <= m; ++i) { this->threadedFreeFermion(v[i]); } return j; #endif } #endif
main.c
void baz(int M, int *restrict T, int N, int *restrict A) { #pragma omp parallel default(shared) { #pragma omp for for (int I = 0; I < N; ++I) { A[I] = I; for (int J = 0; J < M; ++J) A[I] = A[I] + T[J]; } } } void bar(int M, int *restrict T, int N, int *restrict A) { baz(M, T, N, A); } void foo(int N, int *A) { int TSize = 4; int T[4]; for (int I = 0; I < TSize; ++I) T[I] = I; #pragma spf region { bar(TSize, T, N, A); } }
spmv_int.c
////Example of sparse matrix-vector multiply, using CSR (compressed sparse row format). #include <stdio.h> #include <stdlib.h> #include <string.h> // Add timing support #include <sys/timeb.h> double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //#define DEFAULT_DIMSIZE 256 void print_array(char *title, char *name, int *A, int n, int m) { printf("%s:\n", title); int i, j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%d ", name, i, j, A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ int main(int argc, char *argv[]) { int *ia, *ja; int *a, *x, *y; int row, i, j, idx, n, nnzMax, nnz, nrows; int ts, t, rate; n = 10240; //n = 24; if (argc > 1) n = atoi(argv[1]); nrows = n * n; nnzMax = nrows * 5; ia = (int*)malloc(nrows*sizeof(int)); ja = (int*)malloc(nnzMax*sizeof(int)); a = (int*)malloc(nnzMax*sizeof(int)); /* Allocate the source and result vectors */ x = (int*)malloc(nrows*sizeof(int)); y = (int*)malloc(nrows*sizeof(int)); row = 0; nnz = 0; for (i=0; i<n; i++) { for (j=0; j<n; j++) { ia[row] = nnz; if (i>0) { ja[nnz] = row - n; a[nnz] = -1.0; nnz++; } if (j>0) { ja[nnz] = row - 1; a[nnz] = -1.0; nnz++; } ja[nnz] = row; a[nnz] = 4.0; nnz++; if (j<n-1) { ja[nnz] = row + 1; a[nnz] = -1.0; nnz++; } if (i<n-1) { ja[nnz] = row + n; a[nnz] = -1.0; nnz++; } row++; } } ia[row] = nnz; /* Create the source (x) vector */ for (i=0; i<nrows; i++) x[i] = 1.0; double elapsed = read_timer(); int flops = 0; for (row=0; row<nrows; row++) { int sum = 0.0; #pragma omp simd reduction(+:sum,flops) simdlen(8) for (idx=ia[row]; idx<ia[row+1]; idx++) { sum += a[idx] * x[ja[idx]]; flops += 2; } y[row] = sum; } elapsed = read_timer() - elapsed; double gflops = flops / (1.0e9 * elapsed); printf("seq elasped time(s): %.4f\n", elapsed); printf("GFlops: %.4f\n", gflops); for (row=0; row<nrows; row++) { if (y[row] < 0) { fprintf(stderr,"y[%d]=%d, fails consistency test\n", row, y[row]); } } free(ia); free(ja); free(a); free(x); free(y); return 0; }
GraphBLAS.h
//------------------------------------------------------------------------------ // GraphBLAS.h: definitions for the GraphBLAS package //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS is a complete implementation of the GraphBLAS // standard, which defines a set of sparse matrix operations on an extended // algebra of semirings, using an almost unlimited variety of operators and // types. When applied to sparse adjacency matrices, these algebraic // operations are equivalent to computations on graphs. GraphBLAS provides a // powerful and expressive framework creating graph algorithms based on the // elegant mathematics of sparse matrix operations on a semiring. // This GraphBLAS.h file contains GraphBLAS definitions for user applications // to #include. A few functions and variables with the prefix GB_ need to be // defined in this file and are thus technically visible to the user, but they // must not be accessed in user code. They are here only so that the ANSI C11 // _Generic feature can be used in the user-accessible polymorphic functions, // or to implement a fast GxB_Iterator using macros. // This implementation conforms to the GraphBLAS API Specification and also // includes functions and features that are extensions to the spec, which are // given names of the form GxB_* for functions, built-in objects, and macros, // so it is clear which are in the spec and which are extensions. Extensions // with the name GxB_* are user-accessible in SuiteSparse:GraphBLAS but cannot // be guaranteed to appear in all GraphBLAS implementations. // Regarding "historical" functions and symbols: when a GxB_* function or // symbol is added to the C API Specification, the new GrB_* name should be // used instead. The old GxB_* name will be kept for historical reasons, // documented here and in working order; it might no longer be mentioned in the // user guide. Historical functions and symbols would only be removed in the // rare case that they cause a serious conflict with future methods. #ifndef GRAPHBLAS_H #define GRAPHBLAS_H //============================================================================== // include files required by GraphBLAS //============================================================================== #include <stdio.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #include <inttypes.h> #include <stddef.h> #include <limits.h> #include <math.h> #include <stdarg.h> //============================================================================== // renaming for use in R2021a or later //============================================================================== #define GB_CAT2(x,y) x ## y #define GB_EVAL2(x,y) GB_CAT2 (x,y) #ifdef GBRENAME // All symbols must be renamed for the @GrB interface when using // R2021a and following, since those versions include an earlier // version of SuiteSparse:GraphBLAS. #define GB(x) GB_EVAL2 (GM_, x) #define GRB(x) GB_EVAL2 (GrM_, x) #define GXB(x) GB_EVAL2 (GxM_, x) #define GrB GrM #define GxB GxM #include "GB_rename.h" #else // Use the standard GraphBLAS prefix. #define GB(x) GB_EVAL2 (GB_, x) #define GRB(x) GB_EVAL2 (GrB_, x) #define GXB(x) GB_EVAL2 (GxB_, x) #endif //============================================================================== // compiler variations //============================================================================== // Exporting/importing symbols for Microsoft Visual Studio #if ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) ) #ifdef GB_LIBRARY // compiling SuiteSparse:GraphBLAS itself, exporting symbols to user apps #define GB_PUBLIC extern __declspec ( dllexport ) #else // compiling the user application, importing symbols from SuiteSparse:GraphBLAS #define GB_PUBLIC extern __declspec ( dllimport ) #endif #else // for other compilers #define GB_PUBLIC extern #endif // GraphBLAS requires an ANSI C11 compiler for its polymorphic functions (using // the _Generic keyword), but it can be used in an C90 compiler if those // functions are disabled. // With ANSI C11 and later, _Generic keyword and polymorphic functions can be // used. Earlier versions of the language do not have this feature. #ifdef __STDC_VERSION__ // ANSI C17: 201710L // ANSI C11: 201112L // ANSI C99: 199901L // ANSI C95: 199409L #define GxB_STDC_VERSION __STDC_VERSION__ #else // assume ANSI C90 / C89 #define GxB_STDC_VERSION 199001L #endif //------------------------------------------------------------------------------ // definitions for complex types, and restrict keyword //------------------------------------------------------------------------------ #undef GB_restrict // See: // https://www.drdobbs.com/complex-arithmetic-in-the-intersection-o/184401628# #if defined ( __cplusplus ) extern "C++" { // C++ complex types #include <cmath> #include <complex> #undef I typedef std::complex<float> GxB_FC32_t ; typedef std::complex<double> GxB_FC64_t ; } #define GxB_CMPLXF(r,i) GxB_FC32_t(r,i) #define GxB_CMPLX(r,i) GxB_FC64_t(r,i) #define GB_restrict #elif ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) ) // Microsoft Windows complex types #include <complex.h> #undef I typedef _Fcomplex GxB_FC32_t ; typedef _Dcomplex GxB_FC64_t ; #define GxB_CMPLXF(r,i) (_FCbuild (r,i)) #define GxB_CMPLX(r,i) ( _Cbuild (r,i)) #define GB_restrict __restrict #else // ANSI C11 complex types #include <complex.h> #undef I typedef float complex GxB_FC32_t ; typedef double complex GxB_FC64_t ; #ifndef CMPLX // gcc 6.2 on the the Mac doesn't #define CMPLX #define GxB_CMPLX(r,i) \ ((GxB_FC64_t)((double)(r)) + (GxB_FC64_t)((double)(i) * _Complex_I)) #else // use the ANSI C11 CMPLX macro #define GxB_CMPLX(r,i) CMPLX (r,i) #endif #ifndef CMPLXF // gcc 6.2 on the the Mac doesn't #define CMPLXF #define GxB_CMPLXF(r,i) \ ((GxB_FC32_t)((float)(r)) + (GxB_FC32_t)((float)(i) * _Complex_I)) #else // use the ANSI C11 CMPLXF macro #define GxB_CMPLXF(r,i) CMPLXF (r,i) #endif // restrict keyword #if defined ( __NVCC__ ) // NVIDIA nvcc #define GB_restrict __restrict__ #elif GxB_STDC_VERSION >= 199901L // ANSI C99 or later #define GB_restrict restrict #else // ANSI C95 and earlier: no restrict keyword #define GB_restrict #endif #endif //============================================================================== // version control //============================================================================== // There are two version numbers that user codes can check against with // compile-time #if tests: the version of this GraphBLAS implementation, // and the version of the GraphBLAS specification it conforms to. User code // can use tests like this: // // #if GxB_SPEC_VERSION >= GxB_VERSION (2,0,3) // ... use features in GraphBLAS specification 2.0.3 ... // #else // ... only use features in early specifications // #endif // // #if GxB_IMPLEMENTATION > GxB_VERSION (1,4,0) // ... use features from version 1.4.0 of a GraphBLAS package // #endif // X_GRAPHBLAS: names this particular implementation: #define GxB_SUITESPARSE_GRAPHBLAS // GxB_VERSION: a single integer for comparing spec and version levels #define GxB_VERSION(major,minor,sub) \ (((major)*1000ULL + (minor))*1000ULL + (sub)) // The version of this implementation, and the GraphBLAS API version: #define GxB_IMPLEMENTATION_NAME "SuiteSparse:GraphBLAS" #define GxB_IMPLEMENTATION_DATE "Apr 8, 2022" #define GxB_IMPLEMENTATION_MAJOR 7 #define GxB_IMPLEMENTATION_MINOR 0 #define GxB_IMPLEMENTATION_SUB 3 #define GxB_SPEC_DATE "Nov 15, 2021" #define GxB_SPEC_MAJOR 2 #define GxB_SPEC_MINOR 0 #define GxB_SPEC_SUB 0 // compile-time access to the C API Version number of this library. #define GRB_VERSION GxB_SPEC_MAJOR #define GRB_SUBVERSION GxB_SPEC_MINOR #define GxB_IMPLEMENTATION \ GxB_VERSION (GxB_IMPLEMENTATION_MAJOR, \ GxB_IMPLEMENTATION_MINOR, \ GxB_IMPLEMENTATION_SUB) // The 'about' string the describes this particular implementation of GraphBLAS: #define GxB_IMPLEMENTATION_ABOUT \ "SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \ "\nhttp://suitesparse.com Dept of Computer Sci. & Eng, Texas A&M University.\n" // The GraphBLAS license for this particular implementation of GraphBLAS: #define GxB_IMPLEMENTATION_LICENSE \ "SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \ "\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\n"\ "not use SuiteSparse:GraphBLAS except in compliance with the License. You\n" \ "may obtain a copy of the License at\n\n" \ " http://www.apache.org/licenses/LICENSE-2.0\n\n" \ "Unless required by applicable law or agreed to in writing, software\n" \ "distributed under the License is distributed on an \"AS IS\" BASIS,\n" \ "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" \ "See the License for the specific language governing permissions and\n" \ "limitations under the License.\n" //------------------------------------------------------------------------------ // GraphBLAS C API version //------------------------------------------------------------------------------ #define GxB_SPEC_VERSION GxB_VERSION(GxB_SPEC_MAJOR,GxB_SPEC_MINOR,GxB_SPEC_SUB) // The 'spec' string describes the GraphBLAS spec: #define GxB_SPEC_ABOUT \ "GraphBLAS C API, by Aydin Buluc, Timothy Mattson, Scott McMillan,\n" \ "Jose' Moreira, Carl Yang, and Benjamin Brock. Based on 'GraphBLAS\n" \ "Mathematics by Jeremy Kepner. See also 'Graph Algorithms in the Language\n" \ "of Linear Algebra,' edited by J. Kepner and J. Gilbert, SIAM, 2011.\n" //============================================================================== // GrB_Index: the GraphBLAS integer //============================================================================== // GrB_Index: row or column index, or matrix dimension. This typedef is used // for row and column indices, or matrix and vector dimensions. typedef uint64_t GrB_Index ; // GrB_INDEX_MAX is the largest permissible index value. The largest valid // matrix or vector dimension is GrB_INDEX_MAX+1, or 2^60 in SuiteSparse:GrB. #define GrB_INDEX_MAX ((GrB_Index) (1ULL << 60) - 1) // GxB_INDEX_MAX is historical; use GrB_INDEX_MAX+1 instead. It differs by one // from GrB_INDEX_MAX, since it defined the largest valid matrix or vector // dimension. #define GxB_INDEX_MAX ((GrB_Index) (1ULL << 60)) //============================================================================== // GraphBLAS error and informational codes //============================================================================== // All GraphBLAS functions return a code that indicates if it was successful // or not. If more information is required, the GrB_error function can be // called, which returns a string that provides more information on the last // return value from GraphBLAS. // The v1.3 C API did not specify the enum values, but they appear in v2.0. // Changing them will require SuiteSparse:GraphBLAS to bump to v6.x. // Error codes GrB_NOT_IMPLEMENTED and GrB_EMPTY_OBJECT are new to v2.0. typedef enum { GrB_SUCCESS = 0, // all is well //-------------------------------------------------------------------------- // informational codes, not an error: //-------------------------------------------------------------------------- GrB_NO_VALUE = 1, // A(i,j) requested but not there GxB_EXHAUSTED = 2, // iterator is exhausted //-------------------------------------------------------------------------- // errors: //-------------------------------------------------------------------------- GrB_UNINITIALIZED_OBJECT = -1, // object has not been initialized GrB_NULL_POINTER = -2, // input pointer is NULL GrB_INVALID_VALUE = -3, // generic error; some value is bad GrB_INVALID_INDEX = -4, // row or column index is out of bounds GrB_DOMAIN_MISMATCH = -5, // object domains are not compatible GrB_DIMENSION_MISMATCH = -6, // matrix dimensions do not match GrB_OUTPUT_NOT_EMPTY = -7, // output matrix already has values GrB_NOT_IMPLEMENTED = -8, // method not implemented GrB_PANIC = -101, // unknown error GrB_OUT_OF_MEMORY = -102, // out of memory GrB_INSUFFICIENT_SPACE = -103, // output array not large enough GrB_INVALID_OBJECT = -104, // object is corrupted GrB_INDEX_OUT_OF_BOUNDS = -105, // row or col index out of bounds GrB_EMPTY_OBJECT = -106 // an object does not contain a value } GrB_Info ; //============================================================================== // GrB_init / GrB_finalize //============================================================================== // GrB_init must called before any other GraphBLAS operation. GrB_finalize // must be called as the last GraphBLAS operation. // GrB_init defines the mode that GraphBLAS will use: blocking or // non-blocking. With blocking mode, all operations finish before returning to // the user application. With non-blocking mode, operations can be left // pending, and are computed only when needed. // The extension GxB_init does the work of GrB_init, but it also defines the // memory management functions that SuiteSparse:GraphBLAS will use internally. typedef enum { GrB_NONBLOCKING = 0, // methods may return with pending computations GrB_BLOCKING = 1 // no computations are ever left pending } GrB_Mode ; GB_PUBLIC GrB_Info GrB_init // start up GraphBLAS ( GrB_Mode mode // blocking or non-blocking mode ) ; GB_PUBLIC GrB_Info GxB_init // start up GraphBLAS and also define malloc, etc ( GrB_Mode mode, // blocking or non-blocking mode // pointers to memory management functions void * (* user_malloc_function ) (size_t), void * (* user_calloc_function ) (size_t, size_t), void * (* user_realloc_function ) (void *, size_t), void (* user_free_function ) (void *) ) ; GB_PUBLIC GrB_Info GrB_finalize (void) ; // finish GraphBLAS //============================================================================== // GrB_getVersion: GraphBLAS C API version //============================================================================== // GrB_getVersion provides a runtime access of the C API Version. GB_PUBLIC GrB_Info GrB_getVersion // runtime access to C API version number ( unsigned int *version, // returns GRB_VERSION unsigned int *subversion // returns GRB_SUBVERSION ) ; //============================================================================== // GrB_Descriptor: the GraphBLAS descriptor //============================================================================== // The GrB_Descriptor is used to modify the behavior of GraphBLAS operations. // // GrB_OUTP: can be GxB_DEFAULT or GrB_REPLACE. If GrB_REPLACE, then C is // cleared after taking part in the accum operation but before the mask. // In other words, C<Mask> = accum (C,T) is split into Z = accum(C,T) ; // C=0 ; C<Mask> = Z. // // GrB_MASK: can be GxB_DEFAULT, GrB_COMP, GrB_STRUCTURE, or set to both // GrB_COMP and GrB_STRUCTURE. If GxB_DEFAULT, the mask is used // normally, where Mask(i,j)=1 means C(i,j) can be modified by C<Mask>=Z, // and Mask(i,j)=0 means it cannot be modified even if Z(i,j) is has been // computed and differs from C(i,j). If GrB_COMP, this is the same as // taking the logical complement of the Mask. If GrB_STRUCTURE is set, // the value of the mask is not considered, just its pattern. The // GrB_COMP and GrB_STRUCTURE settings can be combined. // // GrB_INP0: can be GxB_DEFAULT or GrB_TRAN. If GxB_DEFAULT, the first input // is used as-is. If GrB_TRAN, it is transposed. Only matrices are // transposed this way. Vectors are never transposed via the // GrB_Descriptor. // // GrB_INP1: the same as GrB_INP0 but for the second input // // GxB_NTHREADS: the maximum number of threads to use in the current method. // If <= GxB_DEFAULT (which is zero), then the number of threads is // determined automatically. This is the default value. // // GxB_CHUNK: an integer parameter that determines the number of threads to use // for a small problem. If w is the work to be performed, and chunk is // the value of this parameter, then the # of threads is limited to floor // (w/chunk). The default chunk is currently 64K, but this may change in // the future. If chunk is set to <= GxB_DEFAULT (that is, zero), the // default is used. // // GxB_AxB_METHOD: this is a hint to SuiteSparse:GraphBLAS on which algorithm // it should use to compute C=A*B, in GrB_mxm, GrB_mxv, and GrB_vxm. // SuiteSparse:GraphBLAS has four different heuristics, and the default // method (GxB_DEFAULT) selects between them automatically. The complete // rule is in the User Guide. The brief discussion here assumes all // matrices are stored by column. All methods compute the same result, // except that floating-point roundoff may differ when working on // floating-point data types. // // GxB_AxB_SAXPY: C(:,j)=A*B(:,j) is computed using a mix of Gustavson // and Hash methods. Each task in the parallel computation makes its // own decision between these two methods, via a heuristic. // // GxB_AxB_GUSTAVSON: This is the same as GxB_AxB_SAXPY, except that // every task uses Gustavon's method, computing C(:,j)=A*B(:,j) via a // gather/scatter workspace of size equal to the number of rows of A. // Very good general-purpose method, but sometimes the workspace can // be too large when many threads are used. // // GxB_AxB_HASH: This is the same as GxB_AxB_SAXPY, except that every // task uses the Hash method. It is very good for hypersparse // matrices and uses very little workspace, and so it scales well to // many threads. // // GxB_AxB_DOT: computes C(i,j) = A(:,i)'*B(:,j), for each entry C(i,j). // A very specialized method that works well only if the mask is // present, very sparse, and not complemented, or when C is a dense // vector or matrix, or when C is small. // // GxB_SORT: GrB_mxm and other methods may return a matrix in a 'jumbled' // state, with indices out of order. The sort is left pending. Some // methods can tolerate jumbled matrices on input, so this can be faster. // However, in some cases, it can be faster for GrB_mxm to sort its output // as it is computed. With GxB_SORT set to GxB_DEFAULT, the sort is left // pending. With GxB_SORT set to a nonzero value, GrB_mxm typically sorts // the resulting matrix C (but not always; this is just a hint). If // GrB_init is called with GrB_BLOCKING mode, the sort will always be // done, and this setting has no effect. // // GxB_COMPRESSION: compression method for GxB_Matrix_serialize and // GxB_Vector_serialize. The default is LZ4. // // GxB_IMPORT: GxB_FAST_IMPORT (faster, for trusted input data) or // GxB_SECURE_IMPORT (slower, for untrusted input data), for the // GxB*_pack* methods. // The following are enumerated values in both the GrB_Desc_Field and the // GxB_Option_Field for global options. They are defined with the same integer // value for both enums, so the user can use them for both. #define GxB_NTHREADS 5 #define GxB_CHUNK 7 // GPU control (DRAFT: in progress, do not use) #define GxB_GPU_CONTROL 21 #define GxB_GPU_CHUNK 22 typedef enum { GrB_OUTP = 0, // descriptor for output of a method GrB_MASK = 1, // descriptor for the mask input of a method GrB_INP0 = 2, // descriptor for the first input of a method GrB_INP1 = 3, // descriptor for the second input of a method GxB_DESCRIPTOR_NTHREADS = GxB_NTHREADS, // max number of threads to use. // If <= GxB_DEFAULT, then GraphBLAS selects the number // of threads automatically. GxB_DESCRIPTOR_CHUNK = GxB_CHUNK, // chunk size for small problems. // If <= GxB_DEFAULT, then the default is used. // GPU control (DRAFT: in progress, do not use) GxB_DESCRIPTOR_GPU_CONTROL = GxB_GPU_CONTROL, GxB_DESCRIPTOR_GPU_CHUNK = GxB_GPU_CHUNK, GxB_AxB_METHOD = 1000, // descriptor for selecting C=A*B algorithm GxB_SORT = 35, // control sort in GrB_mxm GxB_COMPRESSION = 36, // select compression for serialize GxB_IMPORT = 37, // secure vs fast import } GrB_Desc_Field ; typedef enum { // for all GrB_Descriptor fields: GxB_DEFAULT = 0, // default behavior of the method // for GrB_OUTP only: GrB_REPLACE = 1, // clear the output before assigning new values to it // for GrB_MASK only: GrB_COMP = 2, // use the structural complement of the input GrB_STRUCTURE = 4, // use the only pattern of the mask, not its values // for GrB_INP0 and GrB_INP1 only: GrB_TRAN = 3, // use the transpose of the input // for GxB_GPU_CONTROL only (DRAFT: in progress, do not use) GxB_GPU_ALWAYS = 2001, GxB_GPU_NEVER = 2002, // for GxB_AxB_METHOD only: GxB_AxB_GUSTAVSON = 1001, // gather-scatter saxpy method GxB_AxB_DOT = 1003, // dot product GxB_AxB_HASH = 1004, // hash-based saxpy method GxB_AxB_SAXPY = 1005, // saxpy method (any kind) // for GxB_IMPORT only: GxB_SECURE_IMPORT = 502 // GxB*_pack* methods trust their input data } GrB_Desc_Value ; // default for GxB pack is to trust the input data #define GxB_FAST_IMPORT GxB_DEFAULT typedef struct GB_Descriptor_opaque *GrB_Descriptor ; GB_PUBLIC GrB_Info GrB_Descriptor_new // create a new descriptor ( GrB_Descriptor *descriptor // handle of descriptor to create ) ; GB_PUBLIC GrB_Info GrB_Descriptor_set // set a parameter in a descriptor ( GrB_Descriptor desc, // descriptor to modify GrB_Desc_Field field, // parameter to change GrB_Desc_Value val // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Descriptor_get // get a parameter from a descriptor ( GrB_Desc_Value *val, // value of the parameter GrB_Descriptor desc, // descriptor to query; NULL means defaults GrB_Desc_Field field // parameter to query ) ; GB_PUBLIC GrB_Info GxB_Desc_set // set a parameter in a descriptor ( GrB_Descriptor desc, // descriptor to modify GrB_Desc_Field field, // parameter to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Desc_get // get a parameter from a descriptor ( GrB_Descriptor desc, // descriptor to query; NULL means defaults GrB_Desc_Field field, // parameter to query ... // value of the parameter ) ; GB_PUBLIC GrB_Info GrB_Descriptor_free // free a descriptor ( GrB_Descriptor *descriptor // handle of descriptor to free ) ; // Predefined descriptors and their values: GB_PUBLIC GrB_Descriptor // OUTP MASK MASK INP0 INP1 // structural complement // =========== ============== ========== ======== ======== // GrB_NULL // - - - - - GrB_DESC_T1 , // - - - - GrB_TRAN GrB_DESC_T0 , // - - - GrB_TRAN - GrB_DESC_T0T1 , // - - - GrB_TRAN GrB_TRAN GrB_DESC_C , // - - GrB_COMP - - GrB_DESC_CT1 , // - - GrB_COMP - GrB_TRAN GrB_DESC_CT0 , // - - GrB_COMP GrB_TRAN - GrB_DESC_CT0T1 , // - - GrB_COMP GrB_TRAN GrB_TRAN GrB_DESC_S , // - GrB_STRUCTURE - - - GrB_DESC_ST1 , // - GrB_STRUCTURE - - GrB_TRAN GrB_DESC_ST0 , // - GrB_STRUCTURE - GrB_TRAN - GrB_DESC_ST0T1 , // - GrB_STRUCTURE - GrB_TRAN GrB_TRAN GrB_DESC_SC , // - GrB_STRUCTURE GrB_COMP - - GrB_DESC_SCT1 , // - GrB_STRUCTURE GrB_COMP - GrB_TRAN GrB_DESC_SCT0 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN - GrB_DESC_SCT0T1 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN GrB_DESC_R , // GrB_REPLACE - - - - GrB_DESC_RT1 , // GrB_REPLACE - - - GrB_TRAN GrB_DESC_RT0 , // GrB_REPLACE - - GrB_TRAN - GrB_DESC_RT0T1 , // GrB_REPLACE - - GrB_TRAN GrB_TRAN GrB_DESC_RC , // GrB_REPLACE - GrB_COMP - - GrB_DESC_RCT1 , // GrB_REPLACE - GrB_COMP - GrB_TRAN GrB_DESC_RCT0 , // GrB_REPLACE - GrB_COMP GrB_TRAN - GrB_DESC_RCT0T1 , // GrB_REPLACE - GrB_COMP GrB_TRAN GrB_TRAN GrB_DESC_RS , // GrB_REPLACE GrB_STRUCTURE - - - GrB_DESC_RST1 , // GrB_REPLACE GrB_STRUCTURE - - GrB_TRAN GrB_DESC_RST0 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN - GrB_DESC_RST0T1 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN GrB_TRAN GrB_DESC_RSC , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - - GrB_DESC_RSCT1 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - GrB_TRAN GrB_DESC_RSCT0 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN - GrB_DESC_RSCT0T1 ; // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN // GrB_NULL is the default descriptor, with all settings at their defaults: // // OUTP: do not replace the output // MASK: mask is valued and not complemented // INP0: first input not transposed // INP1: second input not transposed // Predefined descriptors may not be modified or freed. Attempting to modify // them results in an error (GrB_INVALID_VALUE). Attempts to free them are // silently ignored. //============================================================================== // GrB_Type: data types //============================================================================== typedef struct GB_Type_opaque *GrB_Type ; // GraphBLAS predefined types and their counterparts in pure C: GB_PUBLIC GrB_Type GrB_BOOL , // in C: bool GrB_INT8 , // in C: int8_t GrB_INT16 , // in C: int16_t GrB_INT32 , // in C: int32_t GrB_INT64 , // in C: int64_t GrB_UINT8 , // in C: uint8_t GrB_UINT16 , // in C: uint16_t GrB_UINT32 , // in C: uint32_t GrB_UINT64 , // in C: uint64_t GrB_FP32 , // in C: float GrB_FP64 , // in C: double GxB_FC32 , // in C: float complex GxB_FC64 ; // in C: double complex //------------------------------------------------------------------------------ // helper macros for polymorphic functions //------------------------------------------------------------------------------ #define GB_CAT(w,x,y,z) w ## x ## y ## z #define GB_CONCAT(w,x,y,z) GB_CAT (w, x, y, z) #if GxB_STDC_VERSION >= 201112L #define GB_CASES(p,prefix,func) \ const bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \ bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \ const int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \ int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \ const int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \ int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \ const int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \ int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \ const int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \ int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \ const uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \ uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \ const uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \ uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \ const uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \ uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \ const uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \ uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \ const float p : GB_CONCAT ( prefix, _, func, _FP32 ), \ float p : GB_CONCAT ( prefix, _, func, _FP32 ), \ const double p : GB_CONCAT ( prefix, _, func, _FP64 ), \ double p : GB_CONCAT ( prefix, _, func, _FP64 ), \ const GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \ GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \ const GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \ GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \ const void * : GB_CONCAT ( prefix, _, func, _UDT ), \ void * : GB_CONCAT ( prefix, _, func, _UDT ) #endif //------------------------------------------------------------------------------ // GrB_Type_new: create a new type //------------------------------------------------------------------------------ // GrB_Type_new is implemented both as a macro and a function. Both are // user-callable. The default is to use the macro, since this allows the name // of the type to be saved as a string, for subsequent error reporting by // GrB_error. #undef GrB_Type_new #undef GrM_Type_new GB_PUBLIC GrB_Info GRB (Type_new) // create a new GraphBLAS type ( GrB_Type *type, // handle of user type to create size_t sizeof_ctype // size = sizeof (ctype) of the C type ) ; // user code should not directly use GB_STR or GB_XSTR // GB_STR: convert the content of x into a string "x" #define GB_XSTR(x) GB_STR(x) #define GB_STR(x) #x // GrB_Type_new as a user-callable macro, which allows the name of the ctype // to be added to the new type. The type_defn is unknown. #define GrB_Type_new(utype, sizeof_ctype) \ GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL) #define GrM_Type_new(utype, sizeof_ctype) \ GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL) // GxB_Type_new creates a type with a name and definition that are known to // GraphBLAS, as strings. The type_name is any valid string (max length of 128 // characters, including the required null-terminating character) that may // appear as the name of a C type created by a C "typedef" statement. It must // not contain any white-space characters. Example, creating a type of size // 16*4+4 = 68 bytes, with a 4-by-4 dense float array and a 32-bit integer: // // typedef struct { float x [4][4] ; int color ; } myquaternion ; // GrB_Type MyQtype ; // GxB_Type_new (&MyQtype, sizeof (myquaternion), "myquaternion", // "typedef struct { float x [4][4] ; int color ; } myquaternion ;") ; // // The type_name and type_defn are both null-terminated strings. Currently, // type_defn is unused, but it will be required for best performance when a JIT // is implemented in SuiteSparse:GraphBLAS (both on the CPU and GPU). User // defined types created by GrB_Type_new will not work with a JIT. // // At most GxB_MAX_NAME_LEN characters are accessed in type_name; characters // beyond that limit are silently ignored. #define GxB_MAX_NAME_LEN 128 GB_PUBLIC GrB_Info GxB_Type_new // create a new named GraphBLAS type ( GrB_Type *type, // handle of user type to create size_t sizeof_ctype, // size = sizeof (ctype) of the C type const char *type_name, // name of the type (max 128 characters) const char *type_defn // typedef for the type (no max length) ) ; // GB_Type_new is historical: use GxB_Type_new instead GB_PUBLIC GrB_Info GB_Type_new // not user-callable ( GrB_Type *type, // handle of user type to create size_t sizeof_ctype, // size of the user type const char *type_name // name of the type, as "sizeof (ctype)" ) ; GB_PUBLIC GrB_Info GxB_Type_name // return the name of a GraphBLAS type ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Type type ) ; GB_PUBLIC GrB_Info GxB_Type_size // determine the size of the type ( size_t *size, // the sizeof the type const GrB_Type type // type to determine the sizeof ) ; GB_PUBLIC GrB_Info GxB_Type_from_name // return the built-in GrB_Type from a name ( GrB_Type *type, // built-in type, or NULL if user-defined const char *type_name // array of size at least GxB_MAX_NAME_LEN ) ; GB_PUBLIC GrB_Info GrB_Type_free // free a user-defined type ( GrB_Type *type // handle of user-defined type to free ) ; //============================================================================== // GrB_UnaryOp: unary operators //============================================================================== // GrB_UnaryOp: a function z=f(x). The function f must have the signature: // void f (void *z, const void *x) ; // The pointers are void * but they are always of pointers to objects of type // ztype and xtype, respectively. The function must typecast its arguments as // needed from void* to ztype* and xtype*. typedef struct GB_UnaryOp_opaque *GrB_UnaryOp ; //------------------------------------------------------------------------------ // built-in unary operators, z = f(x) //------------------------------------------------------------------------------ GB_PUBLIC GrB_UnaryOp // For these functions z=f(x), z and x have the same type. // The suffix in the name is the type of x and z. // z = x z = -x z = 1/x z = ! (x != 0) // identity additive multiplicative logical // inverse inverse negation GrB_IDENTITY_BOOL, GrB_AINV_BOOL, GrB_MINV_BOOL, GxB_LNOT_BOOL, GrB_IDENTITY_INT8, GrB_AINV_INT8, GrB_MINV_INT8, GxB_LNOT_INT8, GrB_IDENTITY_INT16, GrB_AINV_INT16, GrB_MINV_INT16, GxB_LNOT_INT16, GrB_IDENTITY_INT32, GrB_AINV_INT32, GrB_MINV_INT32, GxB_LNOT_INT32, GrB_IDENTITY_INT64, GrB_AINV_INT64, GrB_MINV_INT64, GxB_LNOT_INT64, GrB_IDENTITY_UINT8, GrB_AINV_UINT8, GrB_MINV_UINT8, GxB_LNOT_UINT8, GrB_IDENTITY_UINT16, GrB_AINV_UINT16, GrB_MINV_UINT16, GxB_LNOT_UINT16, GrB_IDENTITY_UINT32, GrB_AINV_UINT32, GrB_MINV_UINT32, GxB_LNOT_UINT32, GrB_IDENTITY_UINT64, GrB_AINV_UINT64, GrB_MINV_UINT64, GxB_LNOT_UINT64, GrB_IDENTITY_FP32, GrB_AINV_FP32, GrB_MINV_FP32, GxB_LNOT_FP32, GrB_IDENTITY_FP64, GrB_AINV_FP64, GrB_MINV_FP64, GxB_LNOT_FP64, // complex unary operators: GxB_IDENTITY_FC32, GxB_AINV_FC32, GxB_MINV_FC32, // no LNOT GxB_IDENTITY_FC64, GxB_AINV_FC64, GxB_MINV_FC64, // for complex // z = 1 z = abs(x) z = bnot(x) z = signum // one absolute value bitwise negation GxB_ONE_BOOL, GrB_ABS_BOOL, GxB_ONE_INT8, GrB_ABS_INT8, GrB_BNOT_INT8, GxB_ONE_INT16, GrB_ABS_INT16, GrB_BNOT_INT16, GxB_ONE_INT32, GrB_ABS_INT32, GrB_BNOT_INT32, GxB_ONE_INT64, GrB_ABS_INT64, GrB_BNOT_INT64, GxB_ONE_UINT8, GrB_ABS_UINT8, GrB_BNOT_UINT8, GxB_ONE_UINT16, GrB_ABS_UINT16, GrB_BNOT_UINT16, GxB_ONE_UINT32, GrB_ABS_UINT32, GrB_BNOT_UINT32, GxB_ONE_UINT64, GrB_ABS_UINT64, GrB_BNOT_UINT64, GxB_ONE_FP32, GrB_ABS_FP32, GxB_ONE_FP64, GrB_ABS_FP64, // complex unary operators: GxB_ONE_FC32, // for complex types, z = abs(x) GxB_ONE_FC64, // is real; listed below. // Boolean negation, z = !x, where both z and x are boolean. There is no // suffix since z and x are only boolean. This operator is identical to // GxB_LNOT_BOOL; it just has a different name. GrB_LNOT ; // GxB_ABS is now in the v1.3 spec, the following names are historical: GB_PUBLIC GrB_UnaryOp // z = abs(x) GxB_ABS_BOOL, GxB_ABS_INT8, GxB_ABS_INT16, GxB_ABS_INT32, GxB_ABS_INT64, GxB_ABS_UINT8, GxB_ABS_UINT16, GxB_ABS_UINT32, GxB_ABS_UINT64, GxB_ABS_FP32, GxB_ABS_FP64 ; //------------------------------------------------------------------------------ // Unary operators for floating-point types only //------------------------------------------------------------------------------ // The following floating-point unary operators and their ANSI C11 equivalents, // are only defined for floating-point (real and complex) types. GB_PUBLIC GrB_UnaryOp //-------------------------------------------------------------------------- // z = f(x) where z and x have the same type (all 4 floating-point types) //-------------------------------------------------------------------------- // z = sqrt (x) z = log (x) z = exp (x) z = log2 (x) GxB_SQRT_FP32, GxB_LOG_FP32, GxB_EXP_FP32, GxB_LOG2_FP32, GxB_SQRT_FP64, GxB_LOG_FP64, GxB_EXP_FP64, GxB_LOG2_FP64, GxB_SQRT_FC32, GxB_LOG_FC32, GxB_EXP_FC32, GxB_LOG2_FC32, GxB_SQRT_FC64, GxB_LOG_FC64, GxB_EXP_FC64, GxB_LOG2_FC64, // z = sin (x) z = cos (x) z = tan (x) GxB_SIN_FP32, GxB_COS_FP32, GxB_TAN_FP32, GxB_SIN_FP64, GxB_COS_FP64, GxB_TAN_FP64, GxB_SIN_FC32, GxB_COS_FC32, GxB_TAN_FC32, GxB_SIN_FC64, GxB_COS_FC64, GxB_TAN_FC64, // z = acos (x) z = asin (x) z = atan (x) GxB_ACOS_FP32, GxB_ASIN_FP32, GxB_ATAN_FP32, GxB_ACOS_FP64, GxB_ASIN_FP64, GxB_ATAN_FP64, GxB_ACOS_FC32, GxB_ASIN_FC32, GxB_ATAN_FC32, GxB_ACOS_FC64, GxB_ASIN_FC64, GxB_ATAN_FC64, // z = sinh (x) z = cosh (x) z = tanh (x) GxB_SINH_FP32, GxB_COSH_FP32, GxB_TANH_FP32, GxB_SINH_FP64, GxB_COSH_FP64, GxB_TANH_FP64, GxB_SINH_FC32, GxB_COSH_FC32, GxB_TANH_FC32, GxB_SINH_FC64, GxB_COSH_FC64, GxB_TANH_FC64, // z = acosh (x) z = asinh (x) z = atanh (x) z = signum (x) GxB_ACOSH_FP32, GxB_ASINH_FP32, GxB_ATANH_FP32, GxB_SIGNUM_FP32, GxB_ACOSH_FP64, GxB_ASINH_FP64, GxB_ATANH_FP64, GxB_SIGNUM_FP64, GxB_ACOSH_FC32, GxB_ASINH_FC32, GxB_ATANH_FC32, GxB_SIGNUM_FC32, GxB_ACOSH_FC64, GxB_ASINH_FC64, GxB_ATANH_FC64, GxB_SIGNUM_FC64, // z = ceil (x) z = floor (x) z = round (x) z = trunc (x) GxB_CEIL_FP32, GxB_FLOOR_FP32, GxB_ROUND_FP32, GxB_TRUNC_FP32, GxB_CEIL_FP64, GxB_FLOOR_FP64, GxB_ROUND_FP64, GxB_TRUNC_FP64, GxB_CEIL_FC32, GxB_FLOOR_FC32, GxB_ROUND_FC32, GxB_TRUNC_FC32, GxB_CEIL_FC64, GxB_FLOOR_FC64, GxB_ROUND_FC64, GxB_TRUNC_FC64, // z = exp2 (x) z = expm1 (x) z = log10 (x) z = log1p (x) GxB_EXP2_FP32, GxB_EXPM1_FP32, GxB_LOG10_FP32, GxB_LOG1P_FP32, GxB_EXP2_FP64, GxB_EXPM1_FP64, GxB_LOG10_FP64, GxB_LOG1P_FP64, GxB_EXP2_FC32, GxB_EXPM1_FC32, GxB_LOG10_FC32, GxB_LOG1P_FC32, GxB_EXP2_FC64, GxB_EXPM1_FC64, GxB_LOG10_FC64, GxB_LOG1P_FC64, //-------------------------------------------------------------------------- // z = f(x) where z and x are the same type (floating-point real only) //-------------------------------------------------------------------------- // z = lgamma (x) z = tgamma (x) z = erf (x) z = erfc (x) GxB_LGAMMA_FP32, GxB_TGAMMA_FP32, GxB_ERF_FP32, GxB_ERFC_FP32, GxB_LGAMMA_FP64, GxB_TGAMMA_FP64, GxB_ERF_FP64, GxB_ERFC_FP64, // frexpx and frexpe return the mantissa and exponent, respectively, // from the ANSI C11 frexp function. The exponent is returned as a // floating-point value, not an integer. // z = frexpx (x) z = frexpe (x) GxB_FREXPX_FP32, GxB_FREXPE_FP32, GxB_FREXPX_FP64, GxB_FREXPE_FP64, //-------------------------------------------------------------------------- // z = f(x) where z and x are the same type (complex only) //-------------------------------------------------------------------------- // z = conj (x) GxB_CONJ_FC32, GxB_CONJ_FC64, //-------------------------------------------------------------------------- // z = f(x) where z is real and x is complex: //-------------------------------------------------------------------------- // z = creal (x) z = cimag (x) z = carg (x) z = abs (x) GxB_CREAL_FC32, GxB_CIMAG_FC32, GxB_CARG_FC32, GxB_ABS_FC32, GxB_CREAL_FC64, GxB_CIMAG_FC64, GxB_CARG_FC64, GxB_ABS_FC64, //-------------------------------------------------------------------------- // z = f(x) where z is bool and x is any floating-point type //-------------------------------------------------------------------------- // z = isinf (x) GxB_ISINF_FP32, GxB_ISINF_FP64, GxB_ISINF_FC32, // isinf (creal (x)) || isinf (cimag (x)) GxB_ISINF_FC64, // isinf (creal (x)) || isinf (cimag (x)) // z = isnan (x) GxB_ISNAN_FP32, GxB_ISNAN_FP64, GxB_ISNAN_FC32, // isnan (creal (x)) || isnan (cimag (x)) GxB_ISNAN_FC64, // isnan (creal (x)) || isnan (cimag (x)) // z = isfinite (x) GxB_ISFINITE_FP32, GxB_ISFINITE_FP64, GxB_ISFINITE_FC32, // isfinite (real (x)) && isfinite (cimag (x)) GxB_ISFINITE_FC64 ; // isfinite (real (x)) && isfinite (cimag (x)) //------------------------------------------------------------------------------ // methods for unary operators //------------------------------------------------------------------------------ typedef void (*GxB_unary_function) (void *, const void *) ; // GrB_UnaryOp_new creates a user-defined unary op, with an automatic // detection of the operator name. #undef GrB_UnaryOp_new #undef GrM_UnaryOp_new GB_PUBLIC GrB_Info GRB (UnaryOp_new) // create a new user-defined unary operator ( GrB_UnaryOp *unaryop, // handle for the new unary operator GxB_unary_function function, // pointer to the unary function GrB_Type ztype, // type of output z GrB_Type xtype // type of input x ) ; #define GrB_UnaryOp_new(op,f,z,x) \ GxB_UnaryOp_new(op,f,z,x, GB_STR(f), NULL) #define GrM_UnaryOp_new(op,f,z,x) \ GxM_UnaryOp_new(op,f,z,x, GB_STR(f), NULL) // GxB_UnaryOp_new creates a named user-defined unary op. GB_PUBLIC GrB_Info GxB_UnaryOp_new // create a new user-defined unary operator ( GrB_UnaryOp *unaryop, // handle for the new unary operator GxB_unary_function function, // pointer to the unary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x const char *unop_name, // name of the user function const char *unop_defn // definition of the user function ) ; // GB_UnaryOp_new is historical: use GxB_UnaryOp_new instead GB_PUBLIC GrB_Info GB_UnaryOp_new // not user-callable ( GrB_UnaryOp *unaryop, // handle for the new unary operator GxB_unary_function function, // pointer to the unary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x const char *unop_name // name of the user function ) ; // GxB_UnaryOp_ztype is historical. Use GxB_UnaryOp_ztype_name instead. GB_PUBLIC GrB_Info GxB_UnaryOp_ztype // return the type of z ( GrB_Type *ztype, // return type of output z GrB_UnaryOp unaryop // unary operator ) ; GB_PUBLIC GrB_Info GxB_UnaryOp_ztype_name // return the type_name of z ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_UnaryOp unaryop // unary operator ) ; // GxB_UnaryOp_xtype is historical. Use GxB_UnaryOp_xtype_name instead. GB_PUBLIC GrB_Info GxB_UnaryOp_xtype // return the type of x ( GrB_Type *xtype, // return type of input x GrB_UnaryOp unaryop // unary operator ) ; GB_PUBLIC GrB_Info GxB_UnaryOp_xtype_name // return the type_name of x ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_UnaryOp unaryop // unary operator ) ; GB_PUBLIC GrB_Info GrB_UnaryOp_free // free a user-created unary operator ( GrB_UnaryOp *unaryop // handle of unary operator to free ) ; //============================================================================== // GrB_BinaryOp: binary operators //============================================================================== // GrB_BinaryOp: a function z=f(x,y). The function f must have the signature: // void f (void *z, const void *x, const void *y) ; // The pointers are void * but they are always of pointers to objects of type // ztype, xtype, and ytype, respectively. See Demo/usercomplex.c for examples. typedef struct GB_BinaryOp_opaque *GrB_BinaryOp ; //------------------------------------------------------------------------------ // built-in binary operators, z = f(x,y), where x,y,z all have the same type //------------------------------------------------------------------------------ GB_PUBLIC GrB_BinaryOp // operators for all 13 types (including complex): // GxB_PAIR_T and GrB_ONEB_T are identical; the latter was added to the // v2.0 C API Specification. // z = x z = y z = 1 z = pow (x,y) GrB_FIRST_BOOL, GrB_SECOND_BOOL, GrB_ONEB_BOOL, GxB_POW_BOOL, GrB_FIRST_INT8, GrB_SECOND_INT8, GrB_ONEB_INT8, GxB_POW_INT8, GrB_FIRST_INT16, GrB_SECOND_INT16, GrB_ONEB_INT16, GxB_POW_INT16, GrB_FIRST_INT32, GrB_SECOND_INT32, GrB_ONEB_INT32, GxB_POW_INT32, GrB_FIRST_INT64, GrB_SECOND_INT64, GrB_ONEB_INT64, GxB_POW_INT64, GrB_FIRST_UINT8, GrB_SECOND_UINT8, GrB_ONEB_UINT8, GxB_POW_UINT8, GrB_FIRST_UINT16, GrB_SECOND_UINT16, GrB_ONEB_UINT16, GxB_POW_UINT16, GrB_FIRST_UINT32, GrB_SECOND_UINT32, GrB_ONEB_UINT32, GxB_POW_UINT32, GrB_FIRST_UINT64, GrB_SECOND_UINT64, GrB_ONEB_UINT64, GxB_POW_UINT64, GrB_FIRST_FP32, GrB_SECOND_FP32, GrB_ONEB_FP32, GxB_POW_FP32, GrB_FIRST_FP64, GrB_SECOND_FP64, GrB_ONEB_FP64, GxB_POW_FP64, // complex: GxB_FIRST_FC32, GxB_SECOND_FC32, GxB_ONEB_FC32, GxB_POW_FC32, GxB_FIRST_FC64, GxB_SECOND_FC64, GxB_ONEB_FC64, GxB_POW_FC64, // z = x+y z = x-y z = x*y z = x/y GrB_PLUS_BOOL, GrB_MINUS_BOOL, GrB_TIMES_BOOL, GrB_DIV_BOOL, GrB_PLUS_INT8, GrB_MINUS_INT8, GrB_TIMES_INT8, GrB_DIV_INT8, GrB_PLUS_INT16, GrB_MINUS_INT16, GrB_TIMES_INT16, GrB_DIV_INT16, GrB_PLUS_INT32, GrB_MINUS_INT32, GrB_TIMES_INT32, GrB_DIV_INT32, GrB_PLUS_INT64, GrB_MINUS_INT64, GrB_TIMES_INT64, GrB_DIV_INT64, GrB_PLUS_UINT8, GrB_MINUS_UINT8, GrB_TIMES_UINT8, GrB_DIV_UINT8, GrB_PLUS_UINT16, GrB_MINUS_UINT16, GrB_TIMES_UINT16, GrB_DIV_UINT16, GrB_PLUS_UINT32, GrB_MINUS_UINT32, GrB_TIMES_UINT32, GrB_DIV_UINT32, GrB_PLUS_UINT64, GrB_MINUS_UINT64, GrB_TIMES_UINT64, GrB_DIV_UINT64, GrB_PLUS_FP32, GrB_MINUS_FP32, GrB_TIMES_FP32, GrB_DIV_FP32, GrB_PLUS_FP64, GrB_MINUS_FP64, GrB_TIMES_FP64, GrB_DIV_FP64, // complex: GxB_PLUS_FC32, GxB_MINUS_FC32, GxB_TIMES_FC32, GxB_DIV_FC32, GxB_PLUS_FC64, GxB_MINUS_FC64, GxB_TIMES_FC64, GxB_DIV_FC64, // z = y-x z = y/x z = 1 z = any(x,y) GxB_RMINUS_BOOL, GxB_RDIV_BOOL, GxB_PAIR_BOOL, GxB_ANY_BOOL, GxB_RMINUS_INT8, GxB_RDIV_INT8, GxB_PAIR_INT8, GxB_ANY_INT8, GxB_RMINUS_INT16, GxB_RDIV_INT16, GxB_PAIR_INT16, GxB_ANY_INT16, GxB_RMINUS_INT32, GxB_RDIV_INT32, GxB_PAIR_INT32, GxB_ANY_INT32, GxB_RMINUS_INT64, GxB_RDIV_INT64, GxB_PAIR_INT64, GxB_ANY_INT64, GxB_RMINUS_UINT8, GxB_RDIV_UINT8, GxB_PAIR_UINT8, GxB_ANY_UINT8, GxB_RMINUS_UINT16, GxB_RDIV_UINT16, GxB_PAIR_UINT16, GxB_ANY_UINT16, GxB_RMINUS_UINT32, GxB_RDIV_UINT32, GxB_PAIR_UINT32, GxB_ANY_UINT32, GxB_RMINUS_UINT64, GxB_RDIV_UINT64, GxB_PAIR_UINT64, GxB_ANY_UINT64, GxB_RMINUS_FP32, GxB_RDIV_FP32, GxB_PAIR_FP32, GxB_ANY_FP32, GxB_RMINUS_FP64, GxB_RDIV_FP64, GxB_PAIR_FP64, GxB_ANY_FP64, // complex: GxB_RMINUS_FC32, GxB_RDIV_FC32, GxB_PAIR_FC32, GxB_ANY_FC32, GxB_RMINUS_FC64, GxB_RDIV_FC64, GxB_PAIR_FC64, GxB_ANY_FC64, // The GxB_IS* comparators z=f(x,y) return the same type as their // inputs. Each of them compute z = (x OP y), where x, y, and z all have // the same type. The value z is either 1 for true or 0 for false, but it // is a value with the same type as x and y. // z = (x == y) z = (x != y) GxB_ISEQ_BOOL, GxB_ISNE_BOOL, GxB_ISEQ_INT8, GxB_ISNE_INT8, GxB_ISEQ_INT16, GxB_ISNE_INT16, GxB_ISEQ_INT32, GxB_ISNE_INT32, GxB_ISEQ_INT64, GxB_ISNE_INT64, GxB_ISEQ_UINT8, GxB_ISNE_UINT8, GxB_ISEQ_UINT16, GxB_ISNE_UINT16, GxB_ISEQ_UINT32, GxB_ISNE_UINT32, GxB_ISEQ_UINT64, GxB_ISNE_UINT64, GxB_ISEQ_FP32, GxB_ISNE_FP32, GxB_ISEQ_FP64, GxB_ISNE_FP64, // complex: GxB_ISEQ_FC32, GxB_ISNE_FC32, GxB_ISEQ_FC64, GxB_ISNE_FC64, // z = (x > y) z = (x < y) z = (x >= y) z = (x <= y) GxB_ISGT_BOOL, GxB_ISLT_BOOL, GxB_ISGE_BOOL, GxB_ISLE_BOOL, GxB_ISGT_INT8, GxB_ISLT_INT8, GxB_ISGE_INT8, GxB_ISLE_INT8, GxB_ISGT_INT16, GxB_ISLT_INT16, GxB_ISGE_INT16, GxB_ISLE_INT16, GxB_ISGT_INT32, GxB_ISLT_INT32, GxB_ISGE_INT32, GxB_ISLE_INT32, GxB_ISGT_INT64, GxB_ISLT_INT64, GxB_ISGE_INT64, GxB_ISLE_INT64, GxB_ISGT_UINT8, GxB_ISLT_UINT8, GxB_ISGE_UINT8, GxB_ISLE_UINT8, GxB_ISGT_UINT16, GxB_ISLT_UINT16, GxB_ISGE_UINT16, GxB_ISLE_UINT16, GxB_ISGT_UINT32, GxB_ISLT_UINT32, GxB_ISGE_UINT32, GxB_ISLE_UINT32, GxB_ISGT_UINT64, GxB_ISLT_UINT64, GxB_ISGE_UINT64, GxB_ISLE_UINT64, GxB_ISGT_FP32, GxB_ISLT_FP32, GxB_ISGE_FP32, GxB_ISLE_FP32, GxB_ISGT_FP64, GxB_ISLT_FP64, GxB_ISGE_FP64, GxB_ISLE_FP64, // z = min(x,y) z = max (x,y) GrB_MIN_BOOL, GrB_MAX_BOOL, GrB_MIN_INT8, GrB_MAX_INT8, GrB_MIN_INT16, GrB_MAX_INT16, GrB_MIN_INT32, GrB_MAX_INT32, GrB_MIN_INT64, GrB_MAX_INT64, GrB_MIN_UINT8, GrB_MAX_UINT8, GrB_MIN_UINT16, GrB_MAX_UINT16, GrB_MIN_UINT32, GrB_MAX_UINT32, GrB_MIN_UINT64, GrB_MAX_UINT64, GrB_MIN_FP32, GrB_MAX_FP32, GrB_MIN_FP64, GrB_MAX_FP64, // Binary operators for each of the 11 real types: // The operators convert non-boolean types internally to boolean and return // a value 1 or 0 in the same type, for true or false. Each computes z = // ((x != 0) OP (y != 0)), where x, y, and z all the same type. These // operators are useful as multiplicative operators when combined with // non-boolean monoids of the same type. // z = (x || y) z = (x && y) z = (x != y) GxB_LOR_BOOL, GxB_LAND_BOOL, GxB_LXOR_BOOL, GxB_LOR_INT8, GxB_LAND_INT8, GxB_LXOR_INT8, GxB_LOR_INT16, GxB_LAND_INT16, GxB_LXOR_INT16, GxB_LOR_INT32, GxB_LAND_INT32, GxB_LXOR_INT32, GxB_LOR_INT64, GxB_LAND_INT64, GxB_LXOR_INT64, GxB_LOR_UINT8, GxB_LAND_UINT8, GxB_LXOR_UINT8, GxB_LOR_UINT16, GxB_LAND_UINT16, GxB_LXOR_UINT16, GxB_LOR_UINT32, GxB_LAND_UINT32, GxB_LXOR_UINT32, GxB_LOR_UINT64, GxB_LAND_UINT64, GxB_LXOR_UINT64, GxB_LOR_FP32, GxB_LAND_FP32, GxB_LXOR_FP32, GxB_LOR_FP64, GxB_LAND_FP64, GxB_LXOR_FP64, // Binary operators that operate only on boolean types: LOR, LAND, LXOR, // and LXNOR. The naming convention differs (_BOOL is not appended to the // name). They are the same as GxB_LOR_BOOL, GxB_LAND_BOOL, and // GxB_LXOR_BOOL, and GrB_EQ_BOOL, respectively. // z = (x || y) z = (x && y) z = (x != y) z = (x == y) GrB_LOR, GrB_LAND, GrB_LXOR, GrB_LXNOR, // Operators for floating-point reals: // z = atan2(x,y) z = hypot(x,y) z = fmod(x,y) z = remainder(x,y) GxB_ATAN2_FP32, GxB_HYPOT_FP32, GxB_FMOD_FP32, GxB_REMAINDER_FP32, GxB_ATAN2_FP64, GxB_HYPOT_FP64, GxB_FMOD_FP64, GxB_REMAINDER_FP64, // z = ldexp(x,y) z = copysign (x,y) GxB_LDEXP_FP32, GxB_COPYSIGN_FP32, GxB_LDEXP_FP64, GxB_COPYSIGN_FP64, // Bitwise operations on signed and unsigned integers: note that // bitwise operations on signed integers can lead to different results, // depending on your compiler; results are implementation-defined. // z = (x | y) z = (x & y) z = (x ^ y) z = ~(x ^ y) GrB_BOR_INT8, GrB_BAND_INT8, GrB_BXOR_INT8, GrB_BXNOR_INT8, GrB_BOR_INT16, GrB_BAND_INT16, GrB_BXOR_INT16, GrB_BXNOR_INT16, GrB_BOR_INT32, GrB_BAND_INT32, GrB_BXOR_INT32, GrB_BXNOR_INT32, GrB_BOR_INT64, GrB_BAND_INT64, GrB_BXOR_INT64, GrB_BXNOR_INT64, GrB_BOR_UINT8, GrB_BAND_UINT8, GrB_BXOR_UINT8, GrB_BXNOR_UINT8, GrB_BOR_UINT16, GrB_BAND_UINT16, GrB_BXOR_UINT16, GrB_BXNOR_UINT16, GrB_BOR_UINT32, GrB_BAND_UINT32, GrB_BXOR_UINT32, GrB_BXNOR_UINT32, GrB_BOR_UINT64, GrB_BAND_UINT64, GrB_BXOR_UINT64, GrB_BXNOR_UINT64, // z = bitget(x,y) z = bitset(x,y) z = bitclr(x,y) GxB_BGET_INT8, GxB_BSET_INT8, GxB_BCLR_INT8, GxB_BGET_INT16, GxB_BSET_INT16, GxB_BCLR_INT16, GxB_BGET_INT32, GxB_BSET_INT32, GxB_BCLR_INT32, GxB_BGET_INT64, GxB_BSET_INT64, GxB_BCLR_INT64, GxB_BGET_UINT8, GxB_BSET_UINT8, GxB_BCLR_UINT8, GxB_BGET_UINT16, GxB_BSET_UINT16, GxB_BCLR_UINT16, GxB_BGET_UINT32, GxB_BSET_UINT32, GxB_BCLR_UINT32, GxB_BGET_UINT64, GxB_BSET_UINT64, GxB_BCLR_UINT64 ; //------------------------------------------------------------------------------ // z=f(x,y) where z and x have the same type, but y is GrB_INT8 //------------------------------------------------------------------------------ // z = bitshift (x,y) computes z = x left-shifted by y bits if y >= 0, or z // = x right-shifted by (-y) bits if y < 0. z is equal to x if y is zero. // z and x have the same type, as given by the suffix on the operator name. // Since y must be signed, it cannot have the same type as x when x is // unsigned; it is always GrB_INT8 for all 8 versions of this operator. // The GxB_BSHIFT_* operators compute the arithmetic shift, and produce the // same results as the bitshift.m function, for all possible inputs. GB_PUBLIC GrB_BinaryOp // z = bitshift(x,y) GxB_BSHIFT_INT8, GxB_BSHIFT_INT16, GxB_BSHIFT_INT32, GxB_BSHIFT_INT64, GxB_BSHIFT_UINT8, GxB_BSHIFT_UINT16, GxB_BSHIFT_UINT32, GxB_BSHIFT_UINT64 ; //------------------------------------------------------------------------------ // z=f(x,y) where z is BOOL and the type of x,y is given by the suffix //------------------------------------------------------------------------------ GB_PUBLIC GrB_BinaryOp // Six comparators z=f(x,y) return their result as boolean, but // where x and y have the same type. The suffix in their names refers to // the type of x and y since z is always boolean. If used as multiply // operators in a semiring, they can only be combined with boolean monoids. // The _BOOL versions of these operators give the same results as their // IS*_BOOL counterparts. GrB_EQ_BOOL and GrB_LXNOR are identical. // z = (x == y) z = (x != y) z = (x > y) z = (x < y) GrB_EQ_BOOL, GrB_NE_BOOL, GrB_GT_BOOL, GrB_LT_BOOL, GrB_EQ_INT8, GrB_NE_INT8, GrB_GT_INT8, GrB_LT_INT8, GrB_EQ_INT16, GrB_NE_INT16, GrB_GT_INT16, GrB_LT_INT16, GrB_EQ_INT32, GrB_NE_INT32, GrB_GT_INT32, GrB_LT_INT32, GrB_EQ_INT64, GrB_NE_INT64, GrB_GT_INT64, GrB_LT_INT64, GrB_EQ_UINT8, GrB_NE_UINT8, GrB_GT_UINT8, GrB_LT_UINT8, GrB_EQ_UINT16, GrB_NE_UINT16, GrB_GT_UINT16, GrB_LT_UINT16, GrB_EQ_UINT32, GrB_NE_UINT32, GrB_GT_UINT32, GrB_LT_UINT32, GrB_EQ_UINT64, GrB_NE_UINT64, GrB_GT_UINT64, GrB_LT_UINT64, GrB_EQ_FP32, GrB_NE_FP32, GrB_GT_FP32, GrB_LT_FP32, GrB_EQ_FP64, GrB_NE_FP64, GrB_GT_FP64, GrB_LT_FP64, // complex: GxB_EQ_FC32, GxB_NE_FC32, GxB_EQ_FC64, GxB_NE_FC64, // z = (x >= y) z = (x <= y) GrB_GE_BOOL, GrB_LE_BOOL, GrB_GE_INT8, GrB_LE_INT8, GrB_GE_INT16, GrB_LE_INT16, GrB_GE_INT32, GrB_LE_INT32, GrB_GE_INT64, GrB_LE_INT64, GrB_GE_UINT8, GrB_LE_UINT8, GrB_GE_UINT16, GrB_LE_UINT16, GrB_GE_UINT32, GrB_LE_UINT32, GrB_GE_UINT64, GrB_LE_UINT64, GrB_GE_FP32, GrB_LE_FP32, GrB_GE_FP64, GrB_LE_FP64 ; //------------------------------------------------------------------------------ // z=f(x,y) where z is complex and the type of x,y is given by the suffix //------------------------------------------------------------------------------ GB_PUBLIC GrB_BinaryOp // z = cmplx (x,y) GxB_CMPLX_FP32, GxB_CMPLX_FP64 ; //============================================================================== // positional GrB_UnaryOp and GrB_BinaryOp operators //============================================================================== // Positional operators do not depend on the value of an entry, but its row or // column index in the matrix instead. For example, for an entry A(i,j), // first_i(A(i,j),y) is equal to i. These operators are useful for returning // node id's as the result of a semiring operation. If used as a mask, zero // has a special value, and thus z=first_i1(A(i,j),j) returns i+1 instead of i. // This can be useful when using a positional operator to construct a mask // matrix or vector for another GraphBLAS operation. It is also essential for // the @GrB interface, since the user view of matrix indices in @GrB is // 1-based, not 0-based. // When applied to a vector, j is always equal to 0. For a GxB_SCALAR, // both i and j are always zero. // GraphBLAS defines a GrB_Index as uint64_t, but these operators return a // GrB_INT32 or GrB_INT64 type, which is more flexible to use because the // result of this operator can be negated, to flag an entry for example. The // value -1 can be used to denote "no node" or "no position". GrB_INT32 is // useful for graphs smaller than 2^31 nodes. If the row or column index // exceeds INT32_MAX, the result is determined by the typecast from the // 64-bit index to the smaller 32-bit index. // Positional operators cannot be used to construct monoids. They can be used // as multiplicative operators in semirings, and as operators for GrB_eWise*, // and GrB_apply (bind first or second). For the latter, the operator cannot // depend on the bound scalar. // When used as multiplicative operators in a semiring, FIRSTJ and SECONDI // are identical. If C(i,j) += t is computed where t = A(i,k)*B(k,j), then // t = k in both cases. Likewise, FIRSTJ1 and SECONDI1 are identical. GB_PUBLIC GrB_BinaryOp GxB_FIRSTI_INT32, GxB_FIRSTI_INT64, // z = first_i(A(i,j),y) == i GxB_FIRSTI1_INT32, GxB_FIRSTI1_INT64, // z = first_i1(A(i,j),y) == i+1 GxB_FIRSTJ_INT32, GxB_FIRSTJ_INT64, // z = first_j(A(i,j),y) == j GxB_FIRSTJ1_INT32, GxB_FIRSTJ1_INT64, // z = first_j1(A(i,j),y) == j+1 GxB_SECONDI_INT32, GxB_SECONDI_INT64, // z = second_i(x,B(i,j)) == i GxB_SECONDI1_INT32, GxB_SECONDI1_INT64, // z = second_i1(x,B(i,j)) == i+1 GxB_SECONDJ_INT32, GxB_SECONDJ_INT64, // z = second_j(x,B(i,j)) == j GxB_SECONDJ1_INT32, GxB_SECONDJ1_INT64 ; // z = second_j1(x,B(i,j)) == j+1 GB_PUBLIC GrB_UnaryOp GxB_POSITIONI_INT32, GxB_POSITIONI_INT64, // z=position_i(A(i,j)) == i GxB_POSITIONI1_INT32, GxB_POSITIONI1_INT64, // z=position_i1(A(i,j)) == i+1 GxB_POSITIONJ_INT32, GxB_POSITIONJ_INT64, // z=position_j(A(i,j)) == j GxB_POSITIONJ1_INT32, GxB_POSITIONJ1_INT64 ;// z=position_j1(A(i,j)) == j+1 //============================================================================== // special GrB_BinaryOp for build methods only //============================================================================== // In GrB*build* methods, passing dup as NULL means that no duplicates are // tolerated. If duplicates appear, an error is returned. If dup is a binary // operator, it is applied to reduce duplicates to a single value. The // GxB_IGNORE_DUP is a special case. It is not an operator, but an indication // that any duplicates are to be ignored. GB_PUBLIC GrB_BinaryOp GxB_IGNORE_DUP ; //============================================================================== // About boolean and bitwise binary operators //============================================================================== // Some of the boolean operators compute the same thing with different names. // For example, x*y and x&&y give the same results for boolean x and y. // Operations such as x < y when x and y are boolean are treated as if true=1 // and false=0. Below is the truth table for all binary operators with boolean // inputs. This table is defined by how C typecasts boolean values for // non-boolean operations. For example, if x, y, and z are boolean, x = true, // and y = true, then z = x + y = true + true = true. DIV (x/y) is defined // below. RDIV (y/x) is shown as \ in the table; it is the same as 2nd. // x y 1st 2nd min max + - * / or and xor eq ne > < ge le \ pow pair // 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 1 1 // 0 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1 // 1 0 1 0 0 1 1 1 0 1 1 0 1 0 1 1 0 1 0 0 1 1 // 1 1 1 1 1 1 1 0 1 1 1 1 0 1 0 0 0 1 1 1 1 1 // GraphBLAS includes a GrB_DIV_BOOL operator in its specification, but does // not define what boolean "division" means. SuiteSparse:GraphBLAS makes the // following interpretation. // GraphBLAS does not generate exceptions for divide-by-zero. Floating-point // divide-by-zero follows the IEEE 754 standard: 1/0 is +Inf, -1/0 is -Inf, and // 0/0 is NaN. For integer division by zero, if x is positive, x/0 is the // largest integer, -x/0 is the integer minimum (zero for unsigned integers), // and 0/0 is zero. For example, for int8, 1/0 is 127, and -1/0 is -128. For // uint8, 1/0 is 255 and 0/0 is zero. // Boolean division is treated as if it were an unsigned integer type with // true=1 and false=0, and with the max and min value being 1 and 0. As a // result, GrB_IDENTITY_BOOL, GrB_AINV_BOOL, and GrB_MINV_BOOL all give the // same result (z = x). // With this convention for boolean "division", there are 11 unique binary // operators that are purely boolean. Other named *_BOOL operators are // redundant but are included in GraphBLAS so that the name space of operators // is complete. Below is a list of all operators and their equivalents. // x: 0 0 1 1 // y: 0 1 0 1 // z: see below // // z = 0 0 0 0 0 (zero function, not predefined) // z = (x && y) 0 0 0 1 AND, MIN, TIMES // z = (x > y) 0 0 1 0 GT, ISGT, and set diff (x\y) // z = x 0 0 1 1 FIRST, DIV // // z = (x < y) 0 1 0 0 LT, ISLT, and set diff (y\x) // z = y 0 1 0 1 SECOND, RDIV // z = (x != y) 0 1 1 0 XOR, MINUS, RMINUS, NE, ISNE // z = (x || y) 0 1 1 1 OR, MAX, PLUS // // z = ~(x || y) 1 0 0 0 (nor(x,y) function, not predefined) // z = (x == y) 1 0 0 1 LXNOR, EQ, ISEQ // z = ~y 1 0 1 0 (not(y), not predefined) // z = (x >= y) 1 0 1 1 GE, ISGE, POW, and "x implies y" // // z = ~x 1 1 0 0 (not(x), not predefined) // z = (x <= y) 1 1 0 1 LE, ISLE, and "y implies x" // z = ~(x && y) 1 1 1 0 (nand(x,y) function, not predefined) // z = 1 1 1 1 1 PAIR, ONEB // // z = any(x,y) 0 . . 1 ANY (pick x or y arbitrarily) // Four more that have no _BOOL suffix are also redundant with the operators // of the form GxB_*_BOOL (GrB_LOR, GrB_LAND, GrB_LXOR, and GrB_LXNOR). // Note that the boolean binary operator space is not complete. Five other // boolean functions could be pre-defined as well: z = 0, nor(x,y), // nand(x,y), not(x), and not(y). // Four of the possible 16 bitwise operators are pre-defined: BOR, BAND, // BXOR, and BXNOR. This assumes that the computations for each bit are // entirely independent (so BSHIFT would not fit in the table above). //------------------------------------------------------------------------------ // methods for binary operators //------------------------------------------------------------------------------ typedef void (*GxB_binary_function) (void *, const void *, const void *) ; // GrB_BinaryOp_new creates a user-defined binary op, with an automatic // detection of the operator name. #undef GrB_BinaryOp_new #undef GrM_BinaryOp_new GB_PUBLIC GrB_Info GRB (BinaryOp_new) ( GrB_BinaryOp *binaryop, // handle for the new binary operator GxB_binary_function function, // pointer to the binary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype // type of input y ) ; #define GrB_BinaryOp_new(op,f,z,x,y) \ GxB_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL) #define GrM_BinaryOp_new(op,f,z,x,y) \ GxM_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL) // GxB_BinaryOp_new creates a named user-defined binary op. GB_PUBLIC GrB_Info GxB_BinaryOp_new ( GrB_BinaryOp *op, // handle for the new binary operator GxB_binary_function function, // pointer to the binary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype, // type of input y const char *binop_name, // name of the user function const char *binop_defn // definition of the user function ) ; // GB_BinaryOp_new is historical: use GxB_BinaryOp_new instead GB_PUBLIC GrB_Info GB_BinaryOp_new // not user-callable ( GrB_BinaryOp *binaryop, // handle for the new binary operator GxB_binary_function function, // pointer to the binary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype, // type of input y const char *binop_name // name of the user function ) ; // NOTE: GxB_BinaryOp_ztype is historical. Use GxB_BinaryOp_ztype_name instead. GB_PUBLIC GrB_Info GxB_BinaryOp_ztype // return the type of z ( GrB_Type *ztype, // return type of output z GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_ztype_name // return the type_name of z ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_BinaryOp binaryop // binary operator to query ) ; // NOTE: GxB_BinaryOp_xtype is historical. Use GxB_BinaryOp_xtype_name instead. GB_PUBLIC GrB_Info GxB_BinaryOp_xtype // return the type of x ( GrB_Type *xtype, // return type of input x GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_xtype_name // return the type_name of x ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_BinaryOp binaryop // binary operator to query ) ; // NOTE: GxB_BinaryOp_ytype is historical. Use GxB_BinaryOp_ytype_name instead. GB_PUBLIC GrB_Info GxB_BinaryOp_ytype // return the type of y ( GrB_Type *ytype, // return type of input y GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_ytype_name // return the type_name of y ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GrB_BinaryOp_free // free a user-created binary operator ( GrB_BinaryOp *binaryop // handle of binary operator to free ) ; //============================================================================== // GxB_SelectOp: select operators (historical) //============================================================================== // GrB_IndexUnaryOp should be used instead of GxB_SelectOp. // GxB_SelectOp is an operator used by GxB_select to select entries from an // input matrix A that are kept in the output C. If an entry A(i,j) in the // matrix A, of size nrows-by-ncols, has the value aij, then it calls the // select function as result = f (i, j, aij, thunk). If the function returns // true, the entry is kept in the output C. If f returns false, the entry is // not kept in C. The type of x for the GxB_SelectOp operator may be any of // the 11 built-in types, or any user-defined type. It may also be GrB_NULL, // to indicate that the function is type-generic and does not depend at all on // the value aij. In this case, x is passed to f as a NULL pointer. // The optional Thunk parameter to GxB_select is a GrB_Scalar. For built-in // select operators (TRIL, TRIU, DIAG, and OFFDIAG), Thunk must have any // built-in type, and thunk = (int64_t) Thunk is used to specify the diagonal // for these operators. Thunk may be NULL, in which case its value is treated // as zero, if it has a built-in type. The value of Thunk (if present) is not // modified by any built-in select operator. // For user-defined select operators, Thunk is not typecasted at all. If // the user operator is defined with a non-NULL Thunk input, then it must // be non-NULL and of the same type, when calling GxB_select. // GxB_SelectOp: a function z=f(i,j,x,thunk) for the GxB_Select operation. // The function f must have the signature: // bool f (GrB_Index i, GrB_Index j, const void *x, const void *thunk) ; // The values of i and j are guaranteed to be in the range 0 to // GrB_INDEX_MAX, and they can be safely typecasted to int64_t then negated, // if desired, without any risk of integer overflow. typedef struct GB_SelectOp_opaque *GxB_SelectOp ; //------------------------------------------------------------------------------ // built-in select operators (historical) //------------------------------------------------------------------------------ // GxB_select (C, Mask, accum, op, A, Thunk, desc) always returns a matrix C of // the same size as A (or A' if GrB_TRAN is in the descriptor). GB_PUBLIC GxB_SelectOp GxB_TRIL, // C=tril(A,thunk): returns true if ((j-i) <= thunk) GxB_TRIU, // C=triu(A,thunk): returns true if ((j-i) >= thunk) GxB_DIAG, // C=diag(A,thunk): returns true if ((j-i) == thunk) GxB_OFFDIAG, // C=A-diag(A,thunk): returns true if ((j-i) != thunk) GxB_NONZERO, // C=A(A ~= 0) GxB_EQ_ZERO, // C=A(A == 0) GxB_GT_ZERO, // C=A(A > 0) GxB_GE_ZERO, // C=A(A >= 0) GxB_LT_ZERO, // C=A(A < 0) GxB_LE_ZERO, // C=A(A <= 0) GxB_NE_THUNK, // C=A(A ~= thunk) GxB_EQ_THUNK, // C=A(A == thunk) GxB_GT_THUNK, // C=A(A > thunk) GxB_GE_THUNK, // C=A(A >= thunk) GxB_LT_THUNK, // C=A(A < thunk) GxB_LE_THUNK ; // C=A(A <= thunk) // For GxB_TRIL, GxB_TRIU, GxB_DIAG, and GxB_OFFDIAG, the parameter Thunk is a // GrB_Scalar of any built-in type. If GrB_NULL, or empty, Thunk is treated as // zero. Otherwise, the single entry is typecasted as (int64_t) Thunk. // These select operators do not depend on the values of A, but just their // position, and they work on matrices of any type. // For GxB_*ZERO, the result depends only on the value of A(i,j). The Thunk // parameter to GxB_select is ignored and may be GrB_NULL. // The operators GxB_TRIL, GxB_TRIU, GxB_DIAG, GxB_OFFDIAG, GxB_NONZERO, // GxB_EQ_ZERO, GxB_NE_THUNK, and GxB_EQ_THUNK work on all built-in types and // all user-defined types. // GxB_GT_*, GxB_GE_*, GxB_LT_*, and GxB_LE_* only work on the 11 built-in // types (not complex). They cannot be used for user-defined types. //------------------------------------------------------------------------------ // select operators: (historical) //------------------------------------------------------------------------------ // User-defined GxB_SelectOps are historical. New code should use // GrB_IndexUnaryOp_new instead. typedef bool (*GxB_select_function) // return true if A(i,j) is kept ( GrB_Index i, // row index of A(i,j) GrB_Index j, // column index of A(i,j) const void *x, // value of A(i,j) const void *thunk // optional input for select function ) ; #undef GxB_SelectOp_new #undef GxM_SelectOp_new GB_PUBLIC GrB_Info GXB (SelectOp_new) // create a new user-defined select operator ( GxB_SelectOp *selectop, // handle for the new select operator GxB_select_function function,// pointer to the select function GrB_Type xtype, // type of input x, or NULL if type-generic GrB_Type ttype // type of thunk, or NULL if not used ) ; #define GxB_SelectOp_new(op,f,x,t) GB_SelectOp_new (op,f,x,t, GB_STR(f)) #define GxM_SelectOp_new(op,f,x,t) GM_SelectOp_new (op,f,x,t, GB_STR(f)) // GB_SelectOp_new should not be called directly, but only through the // GxB_SelectOp_new macro (but use GrB_IndexUnaryOp_new instead). GB_PUBLIC GrB_Info GB_SelectOp_new // not user-callable ( GxB_SelectOp *selectop, // handle for the new select operator GxB_select_function function,// pointer to the select function GrB_Type xtype, // type of input x GrB_Type ttype, // type of thunk, or NULL if not used const char *name // name of the underlying function ) ; // GxB_SelectOp_xtype is historical. Use a GrB_IndexUnaryOp instead. GB_PUBLIC GrB_Info GxB_SelectOp_xtype // return the type of x ( GrB_Type *xtype, // return type of input x GxB_SelectOp selectop // select operator ) ; // GxB_SelectOp_ttype is historical. Use a GrB_IndexUnaryOp instead. GB_PUBLIC GrB_Info GxB_SelectOp_ttype // return the type of thunk ( GrB_Type *ttype, // return type of input thunk GxB_SelectOp selectop // select operator ) ; GB_PUBLIC GrB_Info GxB_SelectOp_free // free a user-created select operator ( GxB_SelectOp *selectop // handle of select operator to free ) ; //============================================================================== // GrB_IndexUnaryOp: a unary operator that depends on the row/col indices //============================================================================== // The indexop has the form z = f(aij, i, j, y) where aij is the numerical // value of the A(i,j) entry, i and j are its row and column index, and y // is a scalar. For vectors, it has the form z = f(vi, i, 0, y). typedef struct GB_IndexUnaryOp_opaque *GrB_IndexUnaryOp ; typedef void (*GxB_index_unary_function) ( void *z, // output value z, of type ztype const void *x, // input value x of type xtype; value of v(i) or A(i,j) GrB_Index i, // row index of A(i,j) GrB_Index j, // column index of A(i,j), or zero for v(i) const void *y // input scalar y ) ; // GrB_IndexUnaryOp_new creates a user-defined unary op, with an automatic // detection of the operator name. #undef GrB_IndexUnaryOp_new #undef GrM_IndexUnaryOp_new GB_PUBLIC GrB_Info GRB (IndexUnaryOp_new) // create a new user-defined IndexUnary op ( GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator GxB_index_unary_function function, // pointer to IndexUnary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x (the A(i,j) entry) GrB_Type ytype // type of input y (the scalar) ) ; #define GrB_IndexUnaryOp_new(op,f,z,x,y) \ GxB_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL) #define GrM_IndexUnaryOp_new(op,f,z,x,y) \ GxM_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL) GB_PUBLIC GrB_Info GxB_IndexUnaryOp_new // create a named user-created IndexUnaryOp ( GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator GxB_index_unary_function function, // pointer to index_unary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype, // type of input y const char *idxop_name, // name of the user function const char *idxop_defn // definition of the user function ) ; GB_PUBLIC GrB_Info GxB_IndexUnaryOp_ztype_name // return the type_name of z ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_IndexUnaryOp op // IndexUnary operator ) ; // For TRIL, TRIU, DIAG, OFFDIAG, COLLE, COLGT, ROWLE, and ROWGT, // the xtype_name is an empty string (""), since these functions do not depend // on the type of the matrix input. GB_PUBLIC GrB_Info GxB_IndexUnaryOp_xtype_name // return the type_name of x ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_IndexUnaryOp op // select operator ) ; GB_PUBLIC GrB_Info GxB_IndexUnaryOp_ytype_name // return the type_name of the scalary y ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_IndexUnaryOp op // select operator ) ; GB_PUBLIC GrB_Info GrB_IndexUnaryOp_free // free a user-created IndexUnaryOp ( GrB_IndexUnaryOp *op // handle of IndexUnary to free ) ; //------------------------------------------------------------------------------ // built-in IndexUnaryOps //------------------------------------------------------------------------------ // To facilitate computations with negative integers, the indices i and j are // of type int64_t. The scalar y has the type corresponding to the suffix // of the name of the operator. GB_PUBLIC GrB_IndexUnaryOp //-------------------------------------------------------------------------- // Result has the integer type INT32 or INT64, the same as the suffix //-------------------------------------------------------------------------- // These operators work on any data type, including user-defined. // ROWINDEX: (i+y): row index plus y GrB_ROWINDEX_INT32, GrB_ROWINDEX_INT64, // COLINDEX: (j+y): col index plus y GrB_COLINDEX_INT32, GrB_COLINDEX_INT64, // DIAGINDEX: (j-(i+y)): diagonal index plus y GrB_DIAGINDEX_INT32, GrB_DIAGINDEX_INT64, //-------------------------------------------------------------------------- // Result is bool, depending only on the indices i,j, and y //-------------------------------------------------------------------------- // These operators work on any data type, including user-defined. // The scalar y is int64. // TRIL: (j <= (i+y)): lower triangular part GrB_TRIL, // TRIU: (j >= (i+y)): upper triangular part GrB_TRIU, // DIAG: (j == (i+y)): diagonal GrB_DIAG, // OFFDIAG: (j != (i+y)): offdiagonal GrB_OFFDIAG, // COLLE: (j <= y): columns 0:y GrB_COLLE, // COLGT: (j > y): columns y+1:ncols-1 GrB_COLGT, // ROWLE: (i <= y): rows 0:y GrB_ROWLE, // ROWGT: (i > y): rows y+1:nrows-1 GrB_ROWGT, //-------------------------------------------------------------------------- // Result is bool, depending only on the value aij //-------------------------------------------------------------------------- // These operators work on matrices and vectors of any built-in type, // including complex types. aij and the scalar y have the same type as the // operator suffix. // VALUEEQ: (aij == y) GrB_VALUEEQ_INT8, GrB_VALUEEQ_UINT8, GrB_VALUEEQ_FP32, GrB_VALUEEQ_BOOL, GrB_VALUEEQ_INT16, GrB_VALUEEQ_UINT16, GrB_VALUEEQ_FP64, GrB_VALUEEQ_INT32, GrB_VALUEEQ_UINT32, GxB_VALUEEQ_FC32, GrB_VALUEEQ_INT64, GrB_VALUEEQ_UINT64, GxB_VALUEEQ_FC64, // VALUENE: (aij != y) GrB_VALUENE_INT8, GrB_VALUENE_UINT8, GrB_VALUENE_FP32, GrB_VALUENE_BOOL, GrB_VALUENE_INT16, GrB_VALUENE_UINT16, GrB_VALUENE_FP64, GrB_VALUENE_INT32, GrB_VALUENE_UINT32, GxB_VALUENE_FC32, GrB_VALUENE_INT64, GrB_VALUENE_UINT64, GxB_VALUENE_FC64, // These operators work on matrices and vectors of any real (non-complex) // built-in type. // VALUELT: (aij < y) GrB_VALUELT_INT8, GrB_VALUELT_UINT8, GrB_VALUELT_FP32, GrB_VALUELT_BOOL, GrB_VALUELT_INT16, GrB_VALUELT_UINT16, GrB_VALUELT_FP64, GrB_VALUELT_INT32, GrB_VALUELT_UINT32, GrB_VALUELT_INT64, GrB_VALUELT_UINT64, // VALUELE: (aij <= y) GrB_VALUELE_INT8, GrB_VALUELE_UINT8, GrB_VALUELE_FP32, GrB_VALUELE_BOOL, GrB_VALUELE_INT16, GrB_VALUELE_UINT16, GrB_VALUELE_FP64, GrB_VALUELE_INT32, GrB_VALUELE_UINT32, GrB_VALUELE_INT64, GrB_VALUELE_UINT64, // VALUEGT: (aij > y) GrB_VALUEGT_INT8, GrB_VALUEGT_UINT8, GrB_VALUEGT_FP32, GrB_VALUEGT_BOOL, GrB_VALUEGT_INT16, GrB_VALUEGT_UINT16, GrB_VALUEGT_FP64, GrB_VALUEGT_INT32, GrB_VALUEGT_UINT32, GrB_VALUEGT_INT64, GrB_VALUEGT_UINT64, // VALUEGE: (aij >= y) GrB_VALUEGE_INT8, GrB_VALUEGE_UINT8, GrB_VALUEGE_FP32, GrB_VALUEGE_BOOL, GrB_VALUEGE_INT16, GrB_VALUEGE_UINT16, GrB_VALUEGE_FP64, GrB_VALUEGE_INT32, GrB_VALUEGE_UINT32, GrB_VALUEGE_INT64, GrB_VALUEGE_UINT64 ; //============================================================================== // GrB_Monoid //============================================================================== // A monoid is an associative operator z=op(x,y) where all three types of z, x, // and y are identical. The monoid also has an identity element, such that // op(x,identity) = op(identity,x) = x. typedef struct GB_Monoid_opaque *GrB_Monoid ; GB_PUBLIC GrB_Info GrB_Monoid_new_BOOL // create a new boolean monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid bool identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT8 // create a new int8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int8_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT8 // create a new uint8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint8_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT16 // create a new int16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int16_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT16 // create a new uint16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint16_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT32 // create a new int32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int32_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT32 // create a new uint32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint32_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT64 // create a new int64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int64_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT64 // create a new uint64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint64_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_FP32 // create a new float monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid float identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_FP64 // create a new double monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid double identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_new_FC32 // create a new float complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC32_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_new_FC64 // create a new double complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC64_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UDT // create a monoid with a user-defined type ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid void *identity // identity value of the monoid ) ; // Type-generic method for creating a new monoid: /* GB_PUBLIC GrB_Info GrB_Monoid_new // create a monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid <type> identity // identity value of the monoid ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Monoid_new(monoid,op,identity) \ _Generic \ ( \ (identity), \ GB_CASES (, GrB, Monoid_new) \ ) \ (monoid, op, identity) #endif // GxB_Monoid_terminal_new is identical to GrB_Monoid_new, except that a // terminal value can be specified. The terminal may be NULL, which indicates // no terminal value (and in this case, it is identical to GrB_Monoid_new). // The terminal value, if not NULL, must have the same type as the identity. GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_BOOL // create a new boolean monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid bool identity, // identity value of the monoid bool terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT8 // create a new int8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int8_t identity, // identity value of the monoid int8_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT8 // create a new uint8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint8_t identity, // identity value of the monoid uint8_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT16 // create a new int16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int16_t identity, // identity value of the monoid int16_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT16 // create a new uint16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint16_t identity, // identity value of the monoid uint16_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT32 // create a new int32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int32_t identity, // identity value of the monoid int32_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT32 // create a new uint32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint32_t identity, // identity value of the monoid uint32_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT64 // create a new int64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int64_t identity, // identity value of the monoid int64_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT64 // create a new uint64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint64_t identity, // identity value of the monoid uint64_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FP32 // create a new float monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid float identity, // identity value of the monoid float terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FP64 // create a new double monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid double identity, // identity value of the monoid double terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FC32 // create a new float complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC32_t identity, // identity value of the monoid GxB_FC32_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FC64 // create a new double complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC64_t identity, // identity value of the monoid GxB_FC64_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UDT // create a monoid with a user type ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid void *identity, // identity value of the monoid void *terminal // terminal value of the monoid ) ; // Type-generic method for creating a new monoid with a terminal value: /* GB_PUBLIC GrB_Info GxB_Monoid_terminal_new // create a monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid <type> identity, // identity value of the monoid <type> terminal // terminal value of the monoid ) ; */ #if GxB_STDC_VERSION >= 201112L #define GxB_Monoid_terminal_new(monoid,op,identity,terminal) \ _Generic \ ( \ (identity), \ GB_CASES (, GxB, Monoid_terminal_new) \ ) \ (monoid, op, identity, terminal) #endif GB_PUBLIC GrB_Info GxB_Monoid_operator // return the monoid operator ( GrB_BinaryOp *op, // returns the binary op of the monoid GrB_Monoid monoid // monoid to query ) ; GB_PUBLIC GrB_Info GxB_Monoid_identity // return the monoid identity ( void *identity, // returns the identity of the monoid GrB_Monoid monoid // monoid to query ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal // return the monoid terminal ( bool *has_terminal, // true if the monoid has a terminal value void *terminal, // returns the terminal of the monoid, // unmodified if has_terminal is false GrB_Monoid monoid // monoid to query ) ; GB_PUBLIC GrB_Info GrB_Monoid_free // free a user-created monoid ( GrB_Monoid *monoid // handle of monoid to free ) ; //============================================================================== // GrB_Semiring //============================================================================== typedef struct GB_Semiring_opaque *GrB_Semiring ; GB_PUBLIC GrB_Info GrB_Semiring_new // create a semiring ( GrB_Semiring *semiring, // handle of semiring to create GrB_Monoid add, // add monoid of the semiring GrB_BinaryOp multiply // multiply operator of the semiring ) ; GB_PUBLIC GrB_Info GxB_Semiring_add // return the add monoid of a semiring ( GrB_Monoid *add, // returns add monoid of the semiring GrB_Semiring semiring // semiring to query ) ; GB_PUBLIC GrB_Info GxB_Semiring_multiply // return multiply operator of a semiring ( GrB_BinaryOp *multiply, // returns multiply operator of the semiring GrB_Semiring semiring // semiring to query ) ; GB_PUBLIC GrB_Info GrB_Semiring_free // free a user-created semiring ( GrB_Semiring *semiring // handle of semiring to free ) ; //============================================================================== // GrB_Scalar: a GraphBLAS scalar //============================================================================== // GxB_Scalar has become GrB_Scalar. The older name GxB_Scalar is kept as // historical, but GrB_Scalar should be used instead. typedef struct GB_Scalar_opaque *GxB_Scalar ; // historical: use GrB_Scalar typedef struct GB_Scalar_opaque *GrB_Scalar ; // use this instead // These methods create, free, copy, and clear a GrB_Scalar. The nvals, // and type methods return basic information about a GrB_Scalar. GB_PUBLIC GrB_Info GrB_Scalar_new // create a new GrB_Scalar with no entry ( GrB_Scalar *s, // handle of GrB_Scalar to create GrB_Type type // type of GrB_Scalar to create ) ; GB_PUBLIC GrB_Info GrB_Scalar_dup // make an exact copy of a GrB_Scalar ( GrB_Scalar *s, // handle of output GrB_Scalar to create const GrB_Scalar t // input GrB_Scalar to copy ) ; GB_PUBLIC GrB_Info GrB_Scalar_clear // clear a GrB_Scalar of its entry ( // type remains unchanged. GrB_Scalar s // GrB_Scalar to clear ) ; GB_PUBLIC GrB_Info GrB_Scalar_nvals // get the number of entries in a GrB_Scalar ( GrB_Index *nvals, // GrB_Scalar has nvals entries (0 or 1) const GrB_Scalar s // GrB_Scalar to query ) ; // NOTE: GxB_Scalar_type is historical. Use GxB_Scalar_type_name instead. GB_PUBLIC GrB_Info GxB_Scalar_type // get the type of a GrB_Scalar ( GrB_Type *type, // returns the type of the GrB_Scalar const GrB_Scalar s // GrB_Scalar to query ) ; GB_PUBLIC GrB_Info GxB_Scalar_type_name // return the name of the type of a scalar ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Scalar s // GrB_Scalar to query ) ; GB_PUBLIC GrB_Info GxB_Scalar_memoryUsage // return # of bytes used for a scalar ( size_t *size, // # of bytes used by the scalar s const GrB_Scalar s // GrB_Scalar to query ) ; GB_PUBLIC GrB_Info GrB_Scalar_free // free a GrB_Scalar ( GrB_Scalar *s // handle of GrB_Scalar to free ) ; // historical names identical to GrB_Scalar_methods above: GB_PUBLIC GrB_Info GxB_Scalar_new (GrB_Scalar *s, GrB_Type type) ; GB_PUBLIC GrB_Info GxB_Scalar_dup (GrB_Scalar *s, const GrB_Scalar t) ; GB_PUBLIC GrB_Info GxB_Scalar_clear (GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_nvals (GrB_Index *nvals, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_free (GrB_Scalar *s) ; //------------------------------------------------------------------------------ // GrB_Scalar_setElement //------------------------------------------------------------------------------ // Set a single GrB_Scalar s, from a user scalar x: s = x, typecasting from the // type of x to the type of w as needed. GB_PUBLIC GrB_Info GrB_Scalar_setElement_BOOL // s = x ( GrB_Scalar s, // GrB_Scalar to modify bool x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT8 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int8_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT8 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint8_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT16 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int16_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT16 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint16_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int32_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint32_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int64_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint64_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_FP32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify float x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_FP64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify double x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FC32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify GxB_FC32_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FC64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify GxB_FC64_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UDT // s = x ( GrB_Scalar s, // GrB_Scalar to modify void *x // user scalar to assign to s ) ; // historical names identical to GrB_Scalar_methods above: GB_PUBLIC GrB_Info GxB_Scalar_setElement_BOOL (GrB_Scalar s, bool x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT8 (GrB_Scalar s, int8_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT16 (GrB_Scalar s, int16_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT32 (GrB_Scalar s, int32_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT64 (GrB_Scalar s, int64_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT8 (GrB_Scalar s, uint8_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT16 (GrB_Scalar s, uint16_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT32 (GrB_Scalar s, uint32_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT64 (GrB_Scalar s, uint64_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP32 (GrB_Scalar s, float x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP64 (GrB_Scalar s, double x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UDT (GrB_Scalar s, void *x) ; // Type-generic version: x can be any supported C type or void * for a // user-defined type. /* GB_PUBLIC GrB_Info GrB_Scalar_setElement // s = x ( GrB_Scalar s, // GrB_Scalar to modify <type> x // user scalar to assign to s ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Scalar_setElement(s,x) \ _Generic \ ( \ (x), \ GB_CASES (, GrB, Scalar_setElement) \ ) \ (s, x) #define GxB_Scalar_setElement(s,x) GrB_Scalar_setElement (s, x) #endif //------------------------------------------------------------------------------ // GrB_Scalar_extractElement //------------------------------------------------------------------------------ // Extract a single entry from a GrB_Scalar, x = s, typecasting from the type // of s to the type of x as needed. GB_PUBLIC GrB_Info GrB_Scalar_extractElement_BOOL // x = s ( bool *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT8 // x = s ( int8_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT8 // x = s ( uint8_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT16 // x = s ( int16_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT16 // x = s ( uint16_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT32 // x = s ( int32_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT32 // x = s ( uint32_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT64 // x = s ( int64_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT64 // x = s ( uint64_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_FP32 // x = s ( float *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_FP64 // x = s ( double *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FC32 // x = s ( GxB_FC32_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FC64 // x = s ( GxB_FC64_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UDT // x = s ( void *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; // historical names identical to GrB_Scalar_methods above: GB_PUBLIC GrB_Info GxB_Scalar_extractElement_BOOL (bool *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT8 (int8_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT16 (int16_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT32 (int32_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT64 (int64_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT8 (uint8_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT16 (uint16_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT32 (uint32_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT64 (uint64_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP32 (float *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP64 (double *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UDT (void *x, const GrB_Scalar s) ; // Type-generic version: x can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Scalar_extractElement // x = s ( <type> *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Scalar_extractElement(x,s) \ _Generic \ ( \ (x), \ GB_CASES (*, GrB, Scalar_extractElement) \ ) \ (x, s) #define GxB_Scalar_extractElement(x,s) GrB_Scalar_extractElement (x, s) #endif //============================================================================== // GrB_Vector: a GraphBLAS vector //============================================================================== typedef struct GB_Vector_opaque *GrB_Vector ; // These methods create, free, copy, and clear a vector. The size, nvals, // and type methods return basic information about a vector. GB_PUBLIC GrB_Info GrB_Vector_new // create a new vector with no entries ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n // vector dimension is n-by-1 // (n must be <= GrB_INDEX_MAX+1) ) ; GB_PUBLIC GrB_Info GrB_Vector_dup // make an exact copy of a vector ( GrB_Vector *w, // handle of output vector to create const GrB_Vector u // input vector to copy ) ; GB_PUBLIC GrB_Info GrB_Vector_clear // clear a vector of all entries; ( // type and dimension remain unchanged. GrB_Vector v // vector to clear ) ; GB_PUBLIC GrB_Info GrB_Vector_size // get the dimension of a vector ( GrB_Index *n, // vector dimension is n-by-1 const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GrB_Vector_nvals // get the number of entries in a vector ( GrB_Index *nvals, // vector has nvals entries const GrB_Vector v // vector to query ) ; // NOTE: GxB_Vector_type is historical. Use GxB_Vector_type_name instead. GB_PUBLIC GrB_Info GxB_Vector_type // get the type of a vector ( GrB_Type *type, // returns the type of the vector const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GxB_Vector_type_name // return the name of the type of a vector ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GxB_Vector_memoryUsage // return # of bytes used for a vector ( size_t *size, // # of bytes used by the vector v const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GxB_Vector_iso // return iso status of a vector ( bool *iso, // true if the vector is iso-valued const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GrB_Vector_free // free a vector ( GrB_Vector *v // handle of vector to free ) ; //------------------------------------------------------------------------------ // GrB_Vector_build //------------------------------------------------------------------------------ // GrB_Vector_build: w = sparse (I,1,X), but using any // associative operator to assemble duplicate entries. GB_PUBLIC GrB_Info GrB_Vector_build_BOOL // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const bool *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT8 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT8 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT16 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT16 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_FP32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const float *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_FP64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const double *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Vector_build_FC32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const GxB_FC32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Vector_build_FC64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const GxB_FC64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UDT // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const void *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Vector_build_Scalar // build a vector from (i,scalar) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples GrB_Scalar scalar, // value for all tuples GrB_Index nvals // number of tuples ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_build // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const <type> *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_build(w,I,X,nvals,dup) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Vector_build) \ ) \ (w, I, ((const void *) (X)), nvals, dup) #endif //------------------------------------------------------------------------------ // GrB_Vector_setElement //------------------------------------------------------------------------------ // Set a single scalar in a vector, w(i) = x, typecasting from the type of x to // the type of w as needed. GB_PUBLIC GrB_Info GrB_Vector_setElement_BOOL // w(i) = x ( GrB_Vector w, // vector to modify bool x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT8 // w(i) = x ( GrB_Vector w, // vector to modify int8_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT8 // w(i) = x ( GrB_Vector w, // vector to modify uint8_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT16 // w(i) = x ( GrB_Vector w, // vector to modify int16_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT16 // w(i) = x ( GrB_Vector w, // vector to modify uint16_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT32 // w(i) = x ( GrB_Vector w, // vector to modify int32_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT32 // w(i) = x ( GrB_Vector w, // vector to modify uint32_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT64 // w(i) = x ( GrB_Vector w, // vector to modify int64_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT64 // w(i) = x ( GrB_Vector w, // vector to modify uint64_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_FP32 // w(i) = x ( GrB_Vector w, // vector to modify float x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_FP64 // w(i) = x ( GrB_Vector w, // vector to modify double x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_setElement_FC32 // w(i) = x ( GrB_Vector w, // vector to modify GxB_FC32_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_setElement_FC64 // w(i) = x ( GrB_Vector w, // vector to modify GxB_FC64_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UDT // w(i) = x ( GrB_Vector w, // vector to modify void *x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_Scalar // w(i) = x ( GrB_Vector w, // vector to modify GrB_Scalar x, // scalar to assign to w(i) GrB_Index i // row index ) ; // Type-generic version: x can be any supported C type or void * for a // user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_setElement // w(i) = x ( GrB_Vector w, // vector to modify <type> x, // scalar to assign to w(i) GrB_Index i // row index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_setElement(w,x,i) \ _Generic \ ( \ (x), \ GB_CASES (, GrB, Vector_setElement), \ default: GrB_Vector_setElement_Scalar \ ) \ (w, x, i) #endif //------------------------------------------------------------------------------ // GrB_Vector_extractElement //------------------------------------------------------------------------------ // Extract a single entry from a vector, x = v(i), typecasting from the type of // v to the type of x as needed. GB_PUBLIC GrB_Info GrB_Vector_extractElement_BOOL // x = v(i) ( bool *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT8 // x = v(i) ( int8_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT8 // x = v(i) ( uint8_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT16 // x = v(i) ( int16_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT16 // x = v(i) ( uint16_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT32 // x = v(i) ( int32_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT32 // x = v(i) ( uint32_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT64 // x = v(i) ( int64_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT64 // x = v(i) ( uint64_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_FP32 // x = v(i) ( float *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_FP64 // x = v(i) ( double *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_extractElement_FC32 // x = v(i) ( GxB_FC32_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_extractElement_FC64 // x = v(i) ( GxB_FC64_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UDT // x = v(i) ( void *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_Scalar // x = v(i) ( GrB_Scalar x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; // Type-generic version: x can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_extractElement // x = v(i) ( <type> *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_extractElement(x,v,i) \ _Generic \ ( \ (x), \ GB_CASES (*, GrB, Vector_extractElement), \ default: GrB_Vector_extractElement_Scalar \ ) \ (x, v, i) #endif //------------------------------------------------------------------------------ // GrB_Vector_removeElement //------------------------------------------------------------------------------ // GrB_Vector_removeElement (v,i) removes the element v(i) from the vector v. GB_PUBLIC GrB_Info GrB_Vector_removeElement ( GrB_Vector v, // vector to remove an element from GrB_Index i // index ) ; //------------------------------------------------------------------------------ // GrB_Vector_extractTuples //------------------------------------------------------------------------------ // Extracts all tuples from a vector, like [I,~,X] = find (v). If // any parameter I and/or X is NULL, then that component is not extracted. For // example, to extract just the row indices, pass I as non-NULL, and X as NULL. // This is like [I,~,~] = find (v). GB_PUBLIC GrB_Info GrB_Vector_extractTuples_BOOL // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples bool *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT8 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int8_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT8 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint8_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT16 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int16_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT16 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint16_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int32_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint32_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int64_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint64_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_FP32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples float *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_FP64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples double *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Vector_extractTuples_FC32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples GxB_FC32_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Vector_extractTuples_FC64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples GxB_FC64_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UDT // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples void *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_extractTuples // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples <type> *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_extractTuples(I,X,nvals,v) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Vector_extractTuples) \ ) \ (I, X, nvals, v) #endif //============================================================================== // GrB_Matrix: a GraphBLAS matrix //============================================================================== typedef struct GB_Matrix_opaque *GrB_Matrix ; // These methods create, free, copy, and clear a matrix. The nrows, ncols, // nvals, and type methods return basic information about a matrix. GB_PUBLIC GrB_Info GrB_Matrix_new // create a new matrix with no entries ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // matrix dimension is nrows-by-ncols GrB_Index ncols // (nrows and ncols must be <= GrB_INDEX_MAX+1) ) ; GB_PUBLIC GrB_Info GrB_Matrix_dup // make an exact copy of a matrix ( GrB_Matrix *C, // handle of output matrix to create const GrB_Matrix A // input matrix to copy ) ; GB_PUBLIC GrB_Info GrB_Matrix_clear // clear a matrix of all entries; ( // type and dimensions remain unchanged GrB_Matrix A // matrix to clear ) ; GB_PUBLIC GrB_Info GrB_Matrix_nrows // get the number of rows of a matrix ( GrB_Index *nrows, // matrix has nrows rows const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GrB_Matrix_ncols // get the number of columns of a matrix ( GrB_Index *ncols, // matrix has ncols columns const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GrB_Matrix_nvals // get the number of entries in a matrix ( GrB_Index *nvals, // matrix has nvals entries const GrB_Matrix A // matrix to query ) ; // NOTE: GxB_Matrix_type is historical. Use GxB_Matrix_type_name instead. GB_PUBLIC GrB_Info GxB_Matrix_type // get the type of a matrix ( GrB_Type *type, // returns the type of the matrix const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GxB_Matrix_type_name // return the name of the type of a matrix ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GxB_Matrix_memoryUsage // return # of bytes used for a matrix ( size_t *size, // # of bytes used by the matrix A const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GxB_Matrix_iso // return iso status of a matrix ( bool *iso, // true if the matrix is iso-valued const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GrB_Matrix_free // free a matrix ( GrB_Matrix *A // handle of matrix to free ) ; //------------------------------------------------------------------------------ // GrB_Matrix_build //------------------------------------------------------------------------------ // GrB_Matrix_build: C = sparse (I,J,X), but using any // associative operator to assemble duplicate entries. GB_PUBLIC GrB_Info GrB_Matrix_build_BOOL // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const bool *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT8 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT8 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT16 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT16 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_FP32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const float *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_FP64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const double *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Matrix_build_FC32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const GxB_FC32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Matrix_build_FC64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const GxB_FC64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UDT // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const void *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Matrix_build_Scalar // build a matrix from (I,J,scalar) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples GrB_Scalar scalar, // value for all tuples GrB_Index nvals // number of tuples ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_build // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const <type> *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_build(C,I,J,X,nvals,dup) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Matrix_build) \ ) \ (C, I, J, ((const void *) (X)), nvals, dup) #endif //------------------------------------------------------------------------------ // GrB_Matrix_setElement //------------------------------------------------------------------------------ // Set a single entry in a matrix, C(i,j) = x, typecasting // from the type of x to the type of C, as needed. GB_PUBLIC GrB_Info GrB_Matrix_setElement_BOOL // C (i,j) = x ( GrB_Matrix C, // matrix to modify bool x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT8 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int8_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT8 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint8_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT16 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int16_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT16 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint16_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int32_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint32_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int64_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint64_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_FP32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify float x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_FP64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify double x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_setElement_FC32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify GxB_FC32_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_setElement_FC64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify GxB_FC64_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UDT // C (i,j) = x ( GrB_Matrix C, // matrix to modify void *x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_Scalar // C (i,j) = x ( GrB_Matrix C, // matrix to modify GrB_Scalar x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; // Type-generic version: x can be any supported C type or void * for a // user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_setElement // C (i,j) = x ( GrB_Matrix C, // matrix to modify <type> x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_setElement(C,x,i,j) \ _Generic \ ( \ (x), \ GB_CASES (, GrB, Matrix_setElement), \ default: GrB_Matrix_setElement_Scalar \ ) \ (C, x, i, j) #endif //------------------------------------------------------------------------------ // GrB_Matrix_extractElement //------------------------------------------------------------------------------ // Extract a single entry from a matrix, x = A(i,j), typecasting from the type // of A to the type of x, as needed. GB_PUBLIC GrB_Info GrB_Matrix_extractElement_BOOL // x = A(i,j) ( bool *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT8 // x = A(i,j) ( int8_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT8 // x = A(i,j) ( uint8_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT16 // x = A(i,j) ( int16_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT16 // x = A(i,j) ( uint16_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT32 // x = A(i,j) ( int32_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT32 // x = A(i,j) ( uint32_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT64 // x = A(i,j) ( int64_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT64 // x = A(i,j) ( uint64_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_FP32 // x = A(i,j) ( float *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_FP64 // x = A(i,j) ( double *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractElement_FC32 // x = A(i,j) ( GxB_FC32_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractElement_FC64 // x = A(i,j) ( GxB_FC64_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UDT // x = A(i,j) ( void *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_Scalar // x = A(i,j) ( GrB_Scalar x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; // Type-generic version: x can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_extractElement // x = A(i,j) ( <type> *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_extractElement(x,A,i,j) \ _Generic \ ( \ (x), \ GB_CASES (*, GrB, Matrix_extractElement), \ default: GrB_Matrix_extractElement_Scalar \ ) \ (x, A, i, j) #endif //------------------------------------------------------------------------------ // GrB_Matrix_removeElement //------------------------------------------------------------------------------ // GrB_Matrix_removeElement (A,i,j) removes the entry A(i,j) from the matrix A. GB_PUBLIC GrB_Info GrB_Matrix_removeElement ( GrB_Matrix C, // matrix to remove entry from GrB_Index i, // row index GrB_Index j // column index ) ; //------------------------------------------------------------------------------ // GrB_Matrix_extractTuples //------------------------------------------------------------------------------ // Extracts all tuples from a matrix, like [I,J,X] = find (A). If // any parameter I, J and/or X is NULL, then that component is not extracted. // For example, to extract just the row and col indices, pass I and J as // non-NULL, and X as NULL. This is like [I,J,~] = find (A). GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_BOOL // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples bool *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT8 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int8_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT8 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint8_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT16 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int16_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT16 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint16_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int32_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint32_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int64_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint64_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_FP32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples float *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_FP64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples double *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractTuples_FC32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples GxB_FC32_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractTuples_FC64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples GxB_FC64_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UDT // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples void *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_extractTuples // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples <type> *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_extractTuples(I,J,X,nvals,A) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Matrix_extractTuples) \ ) \ (I, J, X, nvals, A) #endif //------------------------------------------------------------------------------ // GxB_Matrix_concat and GxB_Matrix_split //------------------------------------------------------------------------------ // GxB_Matrix_concat concatenates an array of matrices (Tiles) into a single // GrB_Matrix C. // Tiles is an m-by-n dense array of matrices held in row-major format, where // Tiles [i*n+j] is the (i,j)th tile, and where m > 0 and n > 0 must hold. Let // A{i,j} denote the (i,j)th tile. The matrix C is constructed by // concatenating these tiles together, as: // C = [ A{0,0} A{0,1} A{0,2} ... A{0,n-1} // A{1,0} A{1,1} A{1,2} ... A{1,n-1} // ... // A{m-1,0} A{m-1,1} A{m-1,2} ... A{m-1,n-1} ] // On input, the matrix C must already exist. Any existing entries in C are // discarded. C must have dimensions nrows by ncols where nrows is the sum of // # of rows in the matrices A{i,0} for all i, and ncols is the sum of the # of // columns in the matrices A{0,j} for all j. All matrices in any given tile // row i must have the same number of rows (that is, nrows(A{i,0}) must equal // nrows(A{i,j}) for all j), and all matrices in any given tile column j must // have the same number of columns (that is, ncols(A{0,j}) must equal // ncols(A{i,j}) for all i). // The type of C is unchanged, and all matrices A{i,j} are typecasted into the // type of C. Any settings made to C by GxB_Matrix_Option_set (format by row // or by column, bitmap switch, hyper switch, and sparsity control) are // unchanged. GB_PUBLIC GrB_Info GxB_Matrix_concat // concatenate a 2D array of matrices ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n const GrB_Index m, const GrB_Index n, const GrB_Descriptor desc // unused, except threading control ) ; // GxB_Matrix_split does the opposite of GxB_Matrix_concat. It splits a single // input matrix A into a 2D array of tiles. On input, the Tiles array must be // a non-NULL pointer to a previously allocated array of size at least m*n // where both m and n must be > 0. The Tiles_nrows array has size m, and // Tiles_ncols has size n. The (i,j)th tile has dimension // Tiles_nrows[i]-by-Tiles_ncols[j]. The sum of Tiles_nrows [0:m-1] must equal // the number of rows of A, and the sum of Tiles_ncols [0:n-1] must equal the // number of columns of A. The type of each tile is the same as the type of A; // no typecasting is done. GB_PUBLIC GrB_Info GxB_Matrix_split // split a matrix into 2D array of matrices ( GrB_Matrix *Tiles, // 2D row-major array of size m-by-n const GrB_Index m, const GrB_Index n, const GrB_Index *Tile_nrows, // array of size m const GrB_Index *Tile_ncols, // array of size n const GrB_Matrix A, // input matrix to split const GrB_Descriptor desc // unused, except threading control ) ; //------------------------------------------------------------------------------ // GxB_Matrix_diag, GxB_Vector_diag, GrB_Matrix_diag //------------------------------------------------------------------------------ // GrB_Matrix_diag constructs a new matrix from a vector. Let n be the length // of the v vector, from GrB_Vector_size (&n, v). If k = 0, then C is an // n-by-n diagonal matrix with the entries from v along the main diagonal of C, // with C(i,i) = v(i). If k is nonzero, C is square with dimension n+abs(k). // If k is positive, it denotes diagonals above the main diagonal, with // C(i,i+k) = v(i). If k is negative, it denotes diagonals below the main // diagonal of C, with C(i-k,i) = v(i). C is constructed with the same type // as v. GB_PUBLIC GrB_Info GrB_Matrix_diag // build a diagonal matrix from a vector ( GrB_Matrix *C, // output matrix const GrB_Vector v, // input vector int64_t k ) ; // GrB_Matrix_diag is like GxB_Matrix_diag (&C, v, k, NULL), except that C must // already exist on input, of the correct size. Any existing entries in C are // discarded. The type of C is preserved, so that if the type of C and v // differ, the entries are typecasted into the type of C. Any settings made to // C by GxB_Matrix_Option_set (format by row or by column, bitmap switch, hyper // switch, and sparsity control) are unchanged. GB_PUBLIC GrB_Info GxB_Matrix_diag // construct a diagonal matrix from a vector ( GrB_Matrix C, // output matrix const GrB_Vector v, // input vector int64_t k, const GrB_Descriptor desc // to specify # of threads ) ; // GxB_Vector_diag extracts a vector v from an input matrix A, which may be // rectangular. If k = 0, the main diagonal of A is extracted; k > 0 denotes // diagonals above the main diagonal of A, and k < 0 denotes diagonals below // the main diagonal of A. Let A have dimension m-by-n. If k is in the range // 0 to n-1, then v has length min(m,n-k). If k is negative and in the range // -1 to -m+1, then v has length min(m+k,n). If k is outside these ranges, // v has length 0 (this is not an error). // v must already exist on input, of the correct length; that is // GrB_Vector_size (&len,v) must return len = 0 if k >= n or k <= -m, len = // min(m,n-k) if k is in the range 0 to n-1, and len = min(m+k,n) if k is in // the range -1 to -m+1. Any existing entries in v are discarded. The type of // v is preserved, so that if the type of A and v differ, the entries are // typecasted into the type of v. Any settings made to v by // GxB_Vector_Option_set (bitmap switch and sparsity control) are unchanged. GB_PUBLIC GrB_Info GxB_Vector_diag // extract a diagonal from a matrix, as a vector ( GrB_Vector v, // output vector const GrB_Matrix A, // input matrix int64_t k, const GrB_Descriptor desc // unused, except threading control ) ; //============================================================================== // SuiteSparse:GraphBLAS options //============================================================================== // The following options modify how SuiteSparse:GraphBLAS stores and operates // on its matrices. The GxB_*Option* methods allow the user to suggest how the // internal representation of a matrix, or all matrices, should be held. These // options have no effect on the result (except for minor roundoff differences // for floating-point types). They only affect the time and memory usage of the // computations. // GxB_Matrix_Option_set: sets an option for a specific matrix // GxB_Matrix_Option_get: queries the current option of a specific matrix // GxB_Vector_Option_set: sets an option for a specific vector // GxB_Vector_Option_get: queries the current option of a specific vector // GxB_Global_Option_set: sets an option for all future matrices // GxB_Global_Option_get: queries current option for all future matrices #define GxB_HYPER 0 // (historical, use GxB_HYPER_SWITCH) typedef enum // for global options or matrix options { //------------------------------------------------------------ // for GxB_Matrix_Option_get/set and GxB_Global_Option_get/set: //------------------------------------------------------------ GxB_HYPER_SWITCH = 0, // defines switch to hypersparse (a double value) GxB_BITMAP_SWITCH = 34, // defines switch to bitmap (a double value) GxB_FORMAT = 1, // defines CSR/CSC format: GxB_BY_ROW or GxB_BY_COL //------------------------------------------------------------ // for GxB_Global_Option_get only: //------------------------------------------------------------ GxB_MODE = 2, // mode passed to GrB_init (blocking or non-blocking) GxB_LIBRARY_NAME = 8, // name of the library (char *) GxB_LIBRARY_VERSION = 9, // library version (3 int's) GxB_LIBRARY_DATE = 10, // date of the library (char *) GxB_LIBRARY_ABOUT = 11, // about the library (char *) GxB_LIBRARY_URL = 12, // URL for the library (char *) GxB_LIBRARY_LICENSE = 13, // license of the library (char *) GxB_LIBRARY_COMPILE_DATE = 14, // date library was compiled (char *) GxB_LIBRARY_COMPILE_TIME = 15, // time library was compiled (char *) GxB_API_VERSION = 16, // API version (3 int's) GxB_API_DATE = 17, // date of the API (char *) GxB_API_ABOUT = 18, // about the API (char *) GxB_API_URL = 19, // URL for the API (char *) GxB_COMPILER_VERSION = 23, // compiler version (3 int's) GxB_COMPILER_NAME = 24, // compiler name (char *) //------------------------------------------------------------ // for GxB_Global_Option_get/set only: //------------------------------------------------------------ GxB_GLOBAL_NTHREADS = GxB_NTHREADS, // max number of threads to use // If <= GxB_DEFAULT, then GraphBLAS selects the number // of threads automatically. GxB_GLOBAL_CHUNK = GxB_CHUNK, // chunk size for small problems. // If <= GxB_DEFAULT, then the default is used. GxB_BURBLE = 99, // diagnostic output (bool *) GxB_PRINTF = 101, // printf function diagnostic output GxB_FLUSH = 102, // flush function diagnostic output GxB_MEMORY_POOL = 103, // memory pool control GxB_PRINT_1BASED = 104, // print matrices as 0-based or 1-based //------------------------------------------------------------ // for GxB_Matrix_Option_get only: //------------------------------------------------------------ GxB_SPARSITY_STATUS = 33, // hyper, sparse, bitmap or full (1,2,4,8) GxB_IS_HYPER = 6, // historical; use GxB_SPARSITY_STATUS //------------------------------------------------------------ // for GxB_Matrix_Option_get/set only: //------------------------------------------------------------ GxB_SPARSITY_CONTROL = 32, // sparsity control: 0 to 15; see below //------------------------------------------------------------ // GPU and options (DRAFT: do not use) //------------------------------------------------------------ GxB_GLOBAL_GPU_CONTROL = GxB_GPU_CONTROL, GxB_GLOBAL_GPU_CHUNK = GxB_GPU_CHUNK, } GxB_Option_Field ; // GxB_FORMAT can be by row or by column: typedef enum { GxB_BY_ROW = 0, // CSR: compressed sparse row format GxB_BY_COL = 1, // CSC: compressed sparse column format GxB_NO_FORMAT = -1 // format not defined } GxB_Format_Value ; // The default format is by row. These constants are defined as GB_PUBLIC // const, so that if SuiteSparse:GraphBLAS is recompiled with a different // default format, and the application is relinked but not recompiled, it will // acquire the new default values. GB_PUBLIC const GxB_Format_Value GxB_FORMAT_DEFAULT ; // the default hyper_switch parameter GB_PUBLIC const double GxB_HYPER_DEFAULT ; // GxB_SPARSITY_CONTROL can be any sum or bitwise OR of these 4 values: #define GxB_HYPERSPARSE 1 // store matrix in hypersparse form #define GxB_SPARSE 2 // store matrix as sparse form (compressed vector) #define GxB_BITMAP 4 // store matrix as a bitmap #define GxB_FULL 8 // store matrix as full; all entries must be present // size of b array for GxB_set/get (GxB_BITMAP_SWITCH, b) #define GxB_NBITMAP_SWITCH 8 // size of bitmap_switch parameter array // any sparsity value: #define GxB_ANY_SPARSITY (GxB_HYPERSPARSE + GxB_SPARSE + GxB_BITMAP + GxB_FULL) // the default sparsity control is any format: #define GxB_AUTO_SPARSITY GxB_ANY_SPARSITY // GxB_Matrix_Option_set (A, GxB_SPARSITY_CONTROL, scontrol) provides hints // about which data structure GraphBLAS should use for the matrix A: // // GxB_AUTO_SPARSITY: GraphBLAS selects automatically. // GxB_HYPERSPARSE: always hypersparse, taking O(nvals(A)) space. // GxB_SPARSE: always in a sparse struture: compressed-sparse row/column, // taking O(nrows+nvals(A)) space if stored by row, or // O(ncols+nvals(A)) if stored by column. // GxB_BITMAP: always in a bitmap struture, taking O(nrows*ncols) space. // GxB_FULL: always in a full structure, taking O(nrows*ncols) space, // unless not all entries are present, in which case the bitmap // storage is used. // // These options can be summed. For example, to allow a matrix to be sparse // or hypersparse, but not bitmap or full, use GxB_SPARSE + GxB_HYPERSPARSE. // Since GxB_FULL can only be used when all entries are present, matrices with // the just GxB_FULL control setting are stored in bitmap form if any entries // are not present. // // Only the least 4 bits of the sparsity control are considered, so the // formats can be bitwise negated. For example, to allow for any format // except full, use ~GxB_FULL. // // GxB_Matrix_Option_get (A, GxB_SPARSITY_STATUS, &sparsity) returns the // current data structure currently used for the matrix A (either hypersparse, // sparse, bitmap, or full). // // GxB_Matrix_Option_get (A, GxB_SPARSITY_CONTROL, &scontrol) returns the hint // for how A should be stored (hypersparse, sparse, bitmap, or full, or any // combination). // GxB_HYPER_SWITCH: // If the matrix or vector structure can be sparse or hypersparse, the // GxB_HYPER_SWITCH parameter controls when each of these structures are // used. The parameter is not used if the matrix or vector is full or // bitmap. // // Let k be the actual number of non-empty vectors (with at least one // entry). This value k is not dependent on whether or not the matrix is // stored in hypersparse structure. Let n be the number of vectors (the # // of columns if CSC, or rows if CSR). Let h be the value of the // GxB_HYPER_SWITCH setting of the matrix. // // If a matrix is currently hypersparse, it can be converted to // non-hypersparse if (n <= 1 || k > 2*n*h). Otherwise it stays // hypersparse. If (n <= 1) the matrix is always stored as // non-hypersparse. // // If currently non-hypersparse, it can be converted to hypersparse if (n // > 1 && k <= n*h). Otherwise, it stays non-hypersparse. If (n <= 1) // the matrix always remains non-hypersparse. // // Setting GxB_HYPER_SWITCH to GxB_ALWAYS_HYPER or GxB_NEVER_HYPER ensures // a matrix always stays hypersparse, or always stays non-hypersparse, // respectively. GB_PUBLIC const double GxB_ALWAYS_HYPER, GxB_NEVER_HYPER ; GB_PUBLIC GrB_Info GxB_Matrix_Option_set // set an option in a matrix ( GrB_Matrix A, // matrix to modify GxB_Option_Field field, // option to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Matrix_Option_get // gets the current option of a matrix ( GrB_Matrix A, // matrix to query GxB_Option_Field field, // option to query ... // return value of the matrix option ) ; GB_PUBLIC GrB_Info GxB_Vector_Option_set // set an option in a vector ( GrB_Vector A, // vector to modify GxB_Option_Field field, // option to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Vector_Option_get // gets the current option of a vector ( GrB_Vector A, // vector to query GxB_Option_Field field, // option to query ... // return value of the vector option ) ; // GxB_Global_Option_set controls the global defaults used when a new matrix is // created. GrB_init defines the following initial settings: // // GxB_Global_Option_set (GxB_HYPER_SWITCH, GxB_HYPER_DEFAULT) ; // GxB_Global_Option_set (GxB_BITMAP_SWITCH, NULL) ; // GxB_Global_Option_set (GxB_FORMAT, GxB_FORMAT_DEFAULT) ; // // The compile-time constants GxB_HYPER_DEFAULT and GxB_FORMAT_DEFAULT are // equal to 0.0625 and GxB_BY_ROW, by default. That is, by default, all new // matrices are held by row in CSR format. If a matrix has fewer than n/16 // columns, it can be converted to hypersparse structure. If it has more than // n/8 columns, it can be converted to a sparse structure. Modifying these // global settings via GxB_Global_Option_set has no effect on matrices already // created. GB_PUBLIC GrB_Info GxB_Global_Option_set // set a global default option ( GxB_Option_Field field, // option to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Global_Option_get // gets the current global default option ( GxB_Option_Field field, // option to query ... // return value of the global option ) ; //------------------------------------------------------------------------------ // GxB_set and GxB_get //------------------------------------------------------------------------------ // The simplest way to set/get a value of a GrB_Descriptor is with // the generic GxB_set and GxB_get functions: // GxB_set (desc, field, value) ; // GxB_get (desc, field, &value) ; // GxB_set and GxB_get are generic methods that and set or query the options in // a GrB_Matrix, a GrB_Descriptor, or in the global options. They can be used // with the following syntax. Note that GxB_NTHREADS can be used for both the // global nthreads_max, and for the # of threads in the descriptor. // To set/get the global options: // // GxB_set (GxB_HYPER_SWITCH, double h) ; // GxB_set (GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ; // GxB_set (GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ; // GxB_get (GxB_HYPER_SWITCH, double *h) ; // // double b [GxB_NBITMAP_SWITCH] ; // GxB_set (GxB_BITMAP_SWITCH, b) ; // GxB_set (GxB_BITMAP_SWITCH, NULL) ; // set defaults // GxB_get (GxB_BITMAP_SWITCH, b) ; // // GxB_set (GxB_FORMAT, GxB_BY_ROW) ; // GxB_set (GxB_FORMAT, GxB_BY_COL) ; // GxB_get (GxB_FORMAT, GxB_Format_Value *s) ; // // GxB_set (GxB_NTHREADS, nthreads_max) ; // GxB_get (GxB_NTHREADS, int *nthreads_max) ; // // GxB_set (GxB_CHUNK, double chunk) ; // GxB_get (GxB_CHUNK, double *chunk) ; // // GxB_set (GxB_BURBLE, bool burble) ; // GxB_get (GxB_BURBLE, bool *burble) ; // // GxB_set (GxB_PRINTF, void *printf_function) ; // GxB_get (GxB_PRINTF, void **printf_function) ; // // GxB_set (GxB_FLUSH, void *flush_function) ; // GxB_get (GxB_FLUSH, void **flush_function) ; // // int64_t free_pool_limit [64] ; // GxB_set (GxB_MEMORY_POOL, free_pool_limit) ; // GxB_set (GxB_MEMORY_POOL, NULL) ; // set defaults // GxB_get (GxB_MEMORY_POOL, free_pool_limit) ; // To get global options that can be queried but not modified: // // GxB_get (GxB_MODE, GrB_Mode *mode) ; // To set/get a matrix option: // // GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, double h) ; // GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ; // GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ; // GxB_get (GrB_Matrix A, GxB_HYPER_SWITCH, double *h) ; // // GxB_set (GrB_Matrix A, GxB_BITMAP_SWITCH, double b) ; // GxB_get (GrB_Matrix A, GxB_BITMAP_SWITCH, double *b) ; // // GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_ROW) ; // GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_COL) ; // GxB_get (GrB_Matrix A, GxB_FORMAT, GxB_Format_Value *s) ; // // GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ; // GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, scontrol) ; // GxB_get (GrB_Matrix A, GxB_SPARSITY_CONTROL, int *scontrol) ; // // GxB_get (GrB_Matrix A, GxB_SPARSITY_STATUS, int *sparsity) ; // To set/get a vector option or status: // // GxB_set (GrB_Vector v, GxB_BITMAP_SWITCH, double b) ; // GxB_get (GrB_Vector v, GxB_BITMAP_SWITCH, double *b) ; // // GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_ROW) ; // GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_COL) ; // GxB_get (GrB_Vector v, GxB_FORMAT, GxB_Format_Value *s) ; // // GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ; // GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, scontrol) ; // GxB_get (GrB_Vector v, GxB_SPARSITY_CONTROL, int *scontrol) ; // // GxB_get (GrB_Vector v, GxB_SPARSITY_STATUS, int *sparsity) ; // To set/get a descriptor field: // // GxB_set (GrB_Descriptor d, GrB_OUTP, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_OUTP, GrB_REPLACE) ; // GxB_get (GrB_Descriptor d, GrB_OUTP, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GrB_MASK, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP) ; // GxB_set (GrB_Descriptor d, GrB_MASK, GrB_STRUCTURE) ; // GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP + GrB_STRUCTURE) ; // GxB_get (GrB_Descriptor d, GrB_MASK, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GrB_INP0, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_INP0, GrB_TRAN) ; // GxB_get (GrB_Descriptor d, GrB_INP0, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GrB_INP1, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_INP1, GrB_TRAN) ; // GxB_get (GrB_Descriptor d, GrB_INP1, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_GUSTAVSON) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_HASH) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_SAXPY) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_DOT) ; // GxB_get (GrB_Descriptor d, GrB_AxB_METHOD, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GxB_NTHREADS, nthreads) ; // GxB_get (GrB_Descriptor d, GxB_NTHREADS, int *nthreads) ; // // GxB_set (GrB_Descriptor d, GxB_CHUNK, double chunk) ; // GxB_get (GrB_Descriptor d, GxB_CHUNK, double *chunk) ; // // GxB_set (GrB_Descriptor d, GxB_SORT, int sort) ; // GxB_get (GrB_Descriptor d, GxB_SORT, int *sort) ; // // GxB_set (GrB_Descriptor d, GxB_COMPRESSION, int method) ; // GxB_get (GrB_Descriptor d, GxB_COMPRESSION, int *method) ; // // GxB_set (GrB_Descriptor d, GxB_IMPORT, int method) ; // GxB_get (GrB_Descriptor d, GxB_IMPORT, int *method) ; #if GxB_STDC_VERSION >= 201112L #define GxB_set(arg1,...) \ _Generic \ ( \ (arg1), \ int : GxB_Global_Option_set , \ GxB_Option_Field : GxB_Global_Option_set , \ GrB_Vector : GxB_Vector_Option_set , \ GrB_Matrix : GxB_Matrix_Option_set , \ GrB_Descriptor : GxB_Desc_set \ ) \ (arg1, __VA_ARGS__) #define GxB_get(arg1,...) \ _Generic \ ( \ (arg1), \ const int : GxB_Global_Option_get , \ int : GxB_Global_Option_get , \ const GxB_Option_Field : GxB_Global_Option_get , \ GxB_Option_Field : GxB_Global_Option_get , \ const GrB_Vector : GxB_Vector_Option_get , \ GrB_Vector : GxB_Vector_Option_get , \ const GrB_Matrix : GxB_Matrix_Option_get , \ GrB_Matrix : GxB_Matrix_Option_get , \ const GrB_Descriptor : GxB_Desc_get , \ GrB_Descriptor : GxB_Desc_get \ ) \ (arg1, __VA_ARGS__) #endif //============================================================================== // GrB_free: free any GraphBLAS object //============================================================================== // for null and invalid objects #define GrB_NULL NULL #define GrB_INVALID_HANDLE NULL #if GxB_STDC_VERSION >= 201112L #define GrB_free(object) \ _Generic \ ( \ (object), \ GrB_Type *: GrB_Type_free , \ GrB_UnaryOp *: GrB_UnaryOp_free , \ GrB_BinaryOp *: GrB_BinaryOp_free , \ GxB_SelectOp *: GxB_SelectOp_free , \ GrB_IndexUnaryOp *: GrB_IndexUnaryOp_free , \ GrB_Monoid *: GrB_Monoid_free , \ GrB_Semiring *: GrB_Semiring_free , \ GrB_Scalar *: GrB_Scalar_free , \ GrB_Vector *: GrB_Vector_free , \ GrB_Matrix *: GrB_Matrix_free , \ GrB_Descriptor *: GrB_Descriptor_free , \ GxB_Iterator *: GxB_Iterator_free \ ) \ (object) #endif //============================================================================== // GrB_wait: finish computations //============================================================================== typedef enum { GrB_COMPLETE = 0, // establishes a happens-before relation GrB_MATERIALIZE = 1 // object is complete } GrB_WaitMode ; // Finish all pending work in a specific object. GB_PUBLIC GrB_Info GrB_Type_wait (GrB_Type type , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_UnaryOp_wait (GrB_UnaryOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_BinaryOp_wait (GrB_BinaryOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GxB_SelectOp_wait (GxB_SelectOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_IndexUnaryOp_wait (GrB_IndexUnaryOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Monoid_wait (GrB_Monoid monoid , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Semiring_wait (GrB_Semiring semiring, GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Descriptor_wait (GrB_Descriptor desc , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Scalar_wait (GrB_Scalar s , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Vector_wait (GrB_Vector v , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Matrix_wait (GrB_Matrix A , GrB_WaitMode waitmode) ; // GrB_wait (object,waitmode) polymorphic function: #if GxB_STDC_VERSION >= 201112L #define GrB_wait(object,waitmode) \ _Generic \ ( \ (object), \ GrB_Type : GrB_Type_wait , \ GrB_UnaryOp : GrB_UnaryOp_wait , \ GrB_BinaryOp : GrB_BinaryOp_wait , \ GxB_SelectOp : GxB_SelectOp_wait , \ GrB_IndexUnaryOp : GrB_IndexUnaryOp_wait , \ GrB_Monoid : GrB_Monoid_wait , \ GrB_Semiring : GrB_Semiring_wait , \ GrB_Scalar : GrB_Scalar_wait , \ GrB_Vector : GrB_Vector_wait , \ GrB_Matrix : GrB_Matrix_wait , \ GrB_Descriptor : GrB_Descriptor_wait \ ) \ (object, waitmode) #endif // NOTE: GxB_Scalar_wait is historical; use GrB_Scalar_wait instead GB_PUBLIC GrB_Info GxB_Scalar_wait (GrB_Scalar *s) ; //============================================================================== // GrB_error: error handling //============================================================================== // Each GraphBLAS method and operation returns a GrB_Info error code. // GrB_error returns additional information on the error in a thread-safe // null-terminated string. The string returned by GrB_error is owned by // the GraphBLAS library and must not be free'd. GB_PUBLIC GrB_Info GrB_Type_error (const char **error, const GrB_Type type) ; GB_PUBLIC GrB_Info GrB_UnaryOp_error (const char **error, const GrB_UnaryOp op) ; GB_PUBLIC GrB_Info GrB_BinaryOp_error (const char **error, const GrB_BinaryOp op) ; GB_PUBLIC GrB_Info GxB_SelectOp_error (const char **error, const GxB_SelectOp op) ; GB_PUBLIC GrB_Info GrB_IndexUnaryOp_error (const char **error, const GrB_IndexUnaryOp op) ; GB_PUBLIC GrB_Info GrB_Monoid_error (const char **error, const GrB_Monoid monoid) ; GB_PUBLIC GrB_Info GrB_Semiring_error (const char **error, const GrB_Semiring semiring) ; GB_PUBLIC GrB_Info GrB_Scalar_error (const char **error, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GrB_Vector_error (const char **error, const GrB_Vector v) ; GB_PUBLIC GrB_Info GrB_Matrix_error (const char **error, const GrB_Matrix A) ; GB_PUBLIC GrB_Info GrB_Descriptor_error (const char **error, const GrB_Descriptor d) ; // GxB_Scalar_error is historical: use GrB_Scalar_error instead GB_PUBLIC GrB_Info GxB_Scalar_error (const char **error, const GrB_Scalar s) ; // GrB_error (error,object) polymorphic function: #if GxB_STDC_VERSION >= 201112L #define GrB_error(error,object) \ _Generic \ ( \ (object), \ const GrB_Type : GrB_Type_error , \ GrB_Type : GrB_Type_error , \ const GrB_UnaryOp : GrB_UnaryOp_error , \ GrB_UnaryOp : GrB_UnaryOp_error , \ const GrB_BinaryOp : GrB_BinaryOp_error , \ GrB_BinaryOp : GrB_BinaryOp_error , \ const GxB_SelectOp : GxB_SelectOp_error , \ GxB_SelectOp : GxB_SelectOp_error , \ const GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \ GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \ const GrB_Monoid : GrB_Monoid_error , \ GrB_Monoid : GrB_Monoid_error , \ const GrB_Semiring : GrB_Semiring_error , \ GrB_Semiring : GrB_Semiring_error , \ const GrB_Scalar : GrB_Scalar_error , \ GrB_Scalar : GrB_Scalar_error , \ const GrB_Vector : GrB_Vector_error , \ GrB_Vector : GrB_Vector_error , \ const GrB_Matrix : GrB_Matrix_error , \ GrB_Matrix : GrB_Matrix_error , \ const GrB_Descriptor : GrB_Descriptor_error , \ GrB_Descriptor : GrB_Descriptor_error \ ) \ (error, object) #endif //============================================================================== // GrB_mxm, vxm, mxv: matrix multiplication over a semiring //============================================================================== GB_PUBLIC GrB_Info GrB_mxm // C<Mask> = accum (C, A*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '+' and '*' for A*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_vxm // w'<Mask> = accum (w, u'*A) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '+' and '*' for u'*A const GrB_Vector u, // first input: vector u const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for w, mask, and A ) ; GB_PUBLIC GrB_Info GrB_mxv // w<Mask> = accum (w, A*u) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '+' and '*' for A*B const GrB_Matrix A, // first input: matrix A const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w, mask, and A ) ; //============================================================================== // GrB_eWiseMult: element-wise matrix and vector operations, set intersection //============================================================================== // GrB_eWiseMult computes C<Mask> = accum (C, A.*B), where ".*" is the Hadamard // product, and where pairs of elements in two matrices (or vectors) are // pairwise "multiplied" with C(i,j) = mult (A(i,j),B(i,j)). GB_PUBLIC GrB_Info GrB_Vector_eWiseMult_Semiring // w<Mask> = accum (w, u.*v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '.*' for t=u.*v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseMult_Monoid // w<Mask> = accum (w, u.*v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Monoid monoid, // defines '.*' for t=u.*v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseMult_BinaryOp // w<Mask> = accum (w, u.*v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp mult, // defines '.*' for t=u.*v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseMult_Semiring // C<Mask> = accum (C, A.*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '.*' for T=A.*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseMult_Monoid // C<Mask> = accum (C, A.*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Monoid monoid, // defines '.*' for T=A.*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseMult_BinaryOp // C<Mask> = accum (C, A.*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp mult, // defines '.*' for T=A.*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; // All 6 of the above type-specific functions are captured in a single // type-generic function, GrB_eWiseMult: #if GxB_STDC_VERSION >= 201112L #define GrB_eWiseMult(C,Mask,accum,op,A,B,desc) \ _Generic \ ( \ (C), \ GrB_Matrix : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \ GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \ const GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \ GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \ const GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp \ ), \ GrB_Vector : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \ GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \ const GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \ GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \ const GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp , \ GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp \ ) \ ) \ (C, Mask, accum, op, A, B, desc) #endif //============================================================================== // GrB_eWiseAdd: element-wise matrix and vector operations, set union //============================================================================== // GrB_eWiseAdd computes C<Mask> = accum (C, A+B), where pairs of elements in // two matrices (or two vectors) are pairwise "added". GB_PUBLIC GrB_Info GrB_Vector_eWiseAdd_Semiring // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseAdd_Monoid // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Monoid monoid, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseAdd_BinaryOp // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp add, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseAdd_Semiring // C<Mask> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseAdd_Monoid // C<Mask> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Monoid monoid, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseAdd_BinaryOp // C<Mask> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp add, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_eWiseAdd(C,Mask,accum,op,A,B,desc) \ _Generic \ ( \ (C), \ GrB_Matrix : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \ GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \ const GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \ GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \ const GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp \ ), \ GrB_Vector : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \ GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \ const GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \ GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \ const GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp , \ GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp \ ) \ ) \ (C, Mask, accum, op, A, B, desc) #endif //============================================================================== // GxB_eWiseUnion: a variant of GrB_eWiseAdd //============================================================================== // GxB_eWiseUnion is a variant of eWiseAdd. The methods create a result with // the same sparsity structure. They differ when an entry is present in A but // not B, or in B but not A. // eWiseAdd does the following, for a matrix, where "+" is the add binary op: // if A(i,j) and B(i,j) are both present: // C(i,j) = A(i,j) + B(i,j) // else if A(i,j) is present but not B(i,j) // C(i,j) = A(i,j) // else if B(i,j) is present but not A(i,j) // C(i,j) = B(i,j) // by contrast, eWiseUnion always applies the operator: // if A(i,j) and B(i,j) are both present: // C(i,j) = A(i,j) + B(i,j) // else if A(i,j) is present but not B(i,j) // C(i,j) = A(i,j) + beta // else if B(i,j) is present but not A(i,j) // C(i,j) = alpha + B(i,j) GB_PUBLIC GrB_Info GxB_Vector_eWiseUnion // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp add, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Scalar alpha, const GrB_Vector v, // second input: vector v const GrB_Scalar beta, const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_eWiseUnion // C<M> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp add, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Scalar alpha, const GrB_Matrix B, // second input: matrix B const GrB_Scalar beta, const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; #if GxB_STDC_VERSION >= 201112L #define GxB_eWiseUnion(C,Mask,accum,op,A,alpha,B,beta,desc) \ _Generic \ ( \ (C), \ const GrB_Matrix : GxB_Matrix_eWiseUnion , \ GrB_Matrix : GxB_Matrix_eWiseUnion , \ const GrB_Vector : GxB_Vector_eWiseUnion , \ GrB_Vector : GxB_Vector_eWiseUnion \ ) \ (C, Mask, accum, op, A, alpha, B, beta, desc) #endif //============================================================================== // GrB_extract: extract a submatrix or subvector //============================================================================== // Extract entries from a matrix or vector; T = A(I,J). This (like most // GraphBLAS methods) is then followed by C<Mask>=accum(C,T). // To extract all rows of a matrix or vector, as in A (:,J), use I=GrB_ALL as // the input argument. For all columns of a matrix, use J=GrB_ALL. GB_PUBLIC const uint64_t *GrB_ALL ; // To extract a range of rows and columns, I and J can be a list of 2 or 3 // indices that defines a range (begin:end) or a strided range (begin:inc:end). // To specify the colon syntax I = begin:end, the array I has size at least 2, // where I [GxB_BEGIN] = begin and I [GxB_END] = end. The parameter ni is then // passed as the special value GxB_RANGE. To specify the colon syntax I = // begin:inc:end, the array I has size at least three, with the values begin, // end, and inc (in that order), and then pass in the value ni = GxB_STRIDE. // The same can be done for the list J and its size, nj. // These special values of ni and nj can be used for GrB_assign, // GrB_extract, and GxB_subassign. #define GxB_RANGE (INT64_MAX) #define GxB_STRIDE (INT64_MAX-1) #define GxB_BACKWARDS (INT64_MAX-2) // for the strided range begin:inc:end, I [GxB_BEGIN] is the value of begin, I // [GxB_END] is the value end, I [GxB_INC] is the magnitude of the stride. If // the stride is negative, use ni = GxB_BACKWARDS. #define GxB_BEGIN (0) #define GxB_END (1) #define GxB_INC (2) // For example, the notation 10:-2:1 defines a sequence [10 8 6 4 2]. // The end point of the sequence (1) need not appear in the sequence, if // the last increment goes past it. To specify the same in GraphBLAS, // use: // GrB_Index I [3], ni = GxB_BACKWARDS ; // I [GxB_BEGIN ] = 10 ; // the start of the sequence // I [GxB_INC ] = 2 ; // the magnitude of the increment // I [GxB_END ] = 1 ; // the end of the sequence GB_PUBLIC GrB_Info GrB_Vector_extract // w<mask> = accum (w, u(I)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Vector u, // first input: vector u const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_extract // C<Mask> = accum (C, A(I,J)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C, Mask, and A ) ; GB_PUBLIC GrB_Info GrB_Col_extract // w<mask> = accum (w, A(I,j)) ( GrB_Vector w, // input/output matrix for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices GrB_Index j, // column index const GrB_Descriptor desc // descriptor for w, mask, and A ) ; //------------------------------------------------------------------------------ // GrB_extract: generic matrix/vector extraction //------------------------------------------------------------------------------ // GrB_extract is a generic interface to the following functions: // GrB_Vector_extract (w,mask,acc,u,I,ni,d) // w<m> = acc (w, u(I)) // GrB_Col_extract (w,mask,acc,A,I,ni,j,d) // w<m> = acc (w, A(I,j)) // GrB_Matrix_extract (C,Mask,acc,A,I,ni,J,nj,d) // C<Mask> = acc (C, A(I,J)) #if GxB_STDC_VERSION >= 201112L #define GrB_extract(arg1,Mask,accum,arg4,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : \ _Generic \ ( \ (arg4), \ const GrB_Vector : GrB_Vector_extract , \ GrB_Vector : GrB_Vector_extract , \ const GrB_Matrix : GrB_Col_extract , \ GrB_Matrix : GrB_Col_extract \ ), \ GrB_Matrix : GrB_Matrix_extract \ ) \ (arg1, Mask, accum, arg4, __VA_ARGS__) #endif //============================================================================== // GxB_subassign: matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A) //============================================================================== // Assign entries in a matrix or vector; C(I,J) = A. // Each GxB_subassign function is very similar to its corresponding GrB_assign // function in the spec, but they differ in two ways: (1) the mask in // GxB_subassign has the same size as w(I) for vectors and C(I,J) for matrices, // and (2) they differ in the GrB_REPLACE option. See the user guide for // details. // In GraphBLAS notation, the two methods can be described as follows: // matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A) // matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A) // --- assign ------------------------------------------------------------------ // // GrB_Matrix_assign C<M>(I,J) += A M same size as matrix C. // A is |I|-by-|J| // // GrB_Vector_assign w<m>(I) += u m same size as column vector w. // u is |I|-by-1 // // GrB_Row_assign C<m'>(i,J) += u' m is a column vector the same // size as a row of C. // u is |J|-by-1, i is a scalar. // // GrB_Col_assign C<m>(I,j) += u m is a column vector the same // size as a column of C. // u is |I|-by-1, j is a scalar. // // --- subassign --------------------------------------------------------------- // // GxB_Matrix_subassign C(I,J)<M> += A M same size as matrix A. // A is |I|-by-|J| // // GxB_Vector_subassign w(I)<m> += u m same size as column vector u. // u is |I|-by-1 // // GxB_Row_subassign C(i,J)<m'> += u' m same size as column vector u. // u is |J|-by-1, i is a scalar. // // GxB_Col_subassign C(I,j)<m> += u m same size as column vector u. // u is |I|-by-1, j is a scalar. GB_PUBLIC GrB_Info GxB_Vector_subassign // w(I)<mask> = accum (w(I),u) ( GrB_Vector w, // input/output matrix for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t) const GrB_Vector u, // first input: vector u const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign // C(I,J)<Mask> = accum (C(I,J),A) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J), Mask, and A ) ; GB_PUBLIC GrB_Info GxB_Col_subassign // C(I,j)<mask> = accum (C(I,j),u) ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(I,j), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t) const GrB_Vector u, // input vector const GrB_Index *I, // row indices GrB_Index ni, // number of row indices GrB_Index j, // column index const GrB_Descriptor desc // descriptor for C(I,j) and mask ) ; GB_PUBLIC GrB_Info GxB_Row_subassign // C(i,J)<mask'> = accum (C(i,J),u') ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(i,J), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t) const GrB_Vector u, // input vector GrB_Index i, // row index const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(i,J) and mask ) ; //------------------------------------------------------------------------------ // GxB_Vector_subassign_[SCALAR]: scalar expansion assignment to subvector //------------------------------------------------------------------------------ // Assigns a single scalar to a subvector, w(I)<mask> = accum(w(I),x). The // scalar x is implicitly expanded into a vector u of size ni-by-1, with each // entry in u equal to x, and then w(I)<mask> = accum(w(I),u) is done. GB_PUBLIC GrB_Info GxB_Vector_subassign_BOOL // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x) bool x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT8 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT8 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT16 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT16 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FP32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) float x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FP64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) double x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FC32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FC64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UDT // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) void *x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_Scalar // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GrB_Scalar x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; //------------------------------------------------------------------------------ // GxB_Matrix_subassign_[SCALAR]: scalar expansion assignment to submatrix //------------------------------------------------------------------------------ // Assigns a single scalar to a submatrix, C(I,J)<Mask> = accum(C(I,J),x). The // scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each // entry in A equal to x, and then C(I,J)<Mask> = accum(C(I,J),A) is done. GB_PUBLIC GrB_Info GxB_Matrix_subassign_BOOL // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) bool x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT8 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT8 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT16 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT16 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FP32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) float x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FP64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) double x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FC32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FC64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UDT // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) void *x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_Scalar // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GrB_Scalar x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; //------------------------------------------------------------------------------ // GxB_subassign: generic submatrix/subvector assignment //------------------------------------------------------------------------------ // GxB_subassign is a generic function that provides access to all specific // GxB_*_subassign* functions: // GxB_Vector_subassign (w,m,acc,u,I,ni,d) // w(I)<m> = acc(w(I),u) // GxB_Matrix_subassign (C,M,acc,A,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),A) // GxB_Col_subassign (C,m,acc,u,I,ni,j,d) // C(I,j)<m> = acc(C(I,j),u) // GxB_Row_subassign (C,m,acc,u,i,J,nj,d) // C(i,J)<m'> = acc(C(i,J),u') // GxB_Vector_subassign_T (w,m,acc,x,I,ni,d) // w(I)<m> = acc(w(I),x) // GxB_Matrix_subassign_T (C,M,acc,x,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),x) #if GxB_STDC_VERSION >= 201112L #define GxB_subassign(arg1,Mask,accum,arg4,arg5,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : \ _Generic \ ( \ (arg4), \ GB_CASES (, GxB, Vector_subassign) , \ const GrB_Scalar : GxB_Vector_subassign_Scalar, \ GrB_Scalar : GxB_Vector_subassign_Scalar, \ default: GxB_Vector_subassign \ ), \ default: \ _Generic \ ( \ (arg4), \ GB_CASES (, GxB, Matrix_subassign) , \ const GrB_Scalar : GxB_Matrix_subassign_Scalar, \ GrB_Scalar : GxB_Matrix_subassign_Scalar, \ const GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GxB_Col_subassign , \ GrB_Index *: GxB_Col_subassign , \ default: GxB_Row_subassign \ ), \ GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GxB_Col_subassign , \ GrB_Index *: GxB_Col_subassign , \ default: GxB_Row_subassign \ ), \ default: GxB_Matrix_subassign \ ) \ ) \ (arg1, Mask, accum, arg4, arg5, __VA_ARGS__) #endif //============================================================================== // GrB_assign: matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A) //============================================================================== // Assign entries in a matrix or vector; C(I,J) = A. // Each of these can be used with their generic name, GrB_assign. GB_PUBLIC GrB_Info GrB_Vector_assign // w<mask>(I) = accum (w(I),u) ( GrB_Vector w, // input/output matrix for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t) const GrB_Vector u, // first input: vector u const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign // C<Mask>(I,J) = accum (C(I,J),A) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C, Mask, and A ) ; GB_PUBLIC GrB_Info GrB_Col_assign // C<mask>(I,j) = accum (C(I,j),u) ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(:,j), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t) const GrB_Vector u, // input vector const GrB_Index *I, // row indices GrB_Index ni, // number of row indices GrB_Index j, // column index const GrB_Descriptor desc // descriptor for C(:,j) and mask ) ; GB_PUBLIC GrB_Info GrB_Row_assign // C<mask'>(i,J) = accum (C(i,J),u') ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(i,:), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t) const GrB_Vector u, // input vector GrB_Index i, // row index const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(i,:) and mask ) ; //------------------------------------------------------------------------------ // GrB_Vector_assign_[SCALAR]: scalar expansion assignment to subvector //------------------------------------------------------------------------------ // Assigns a single scalar to a subvector, w<mask>(I) = accum(w(I),x). The // scalar x is implicitly expanded into a vector u of size ni-by-1, with each // entry in u equal to x, and then w<mask>(I) = accum(w(I),u) is done. GB_PUBLIC GrB_Info GrB_Vector_assign_BOOL // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x) bool x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT8 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT8 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT16 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT16 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_FP32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) float x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_FP64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) double x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_assign_FC32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_assign_FC64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UDT // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) void *x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_Scalar // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GrB_Scalar x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------------------------------------------ // GrB_Matrix_assign_[SCALAR]: scalar expansion assignment to submatrix //------------------------------------------------------------------------------ // Assigns a single scalar to a submatrix, C<Mask>(I,J) = accum(C(I,J),x). The // scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each // entry in A equal to x, and then C<Mask>(I,J) = accum(C(I,J),A) is done. GB_PUBLIC GrB_Info GrB_Matrix_assign_BOOL // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) bool x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT8 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT8 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT16 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT16 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_FP32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) float x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_FP64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) double x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_assign_FC32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_assign_FC64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UDT // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) void *x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_Scalar // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GrB_Scalar x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; //------------------------------------------------------------------------------ // GrB_assign: generic submatrix/subvector assignment //------------------------------------------------------------------------------ // GrB_assign is a generic function that provides access to all specific // GrB_*_assign* functions: // GrB_Vector_assign_T (w,m,acc,x,I,ni,d) // w<m>(I) = acc(w(I),x) // GrB_Vector_assign (w,m,acc,u,I,ni,d) // w<m>(I) = acc(w(I),u) // GrB_Matrix_assign_T (C,M,acc,x,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),x) // GrB_Col_assign (C,m,acc,u,I,ni,j,d) // C<m>(I,j) = acc(C(I,j),u) // GrB_Row_assign (C,m,acc,u,i,J,nj,d) // C<m'>(i,J) = acc(C(i,J),u') // GrB_Matrix_assign (C,M,acc,A,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),A) #if GxB_STDC_VERSION >= 201112L #define GrB_assign(arg1,Mask,accum,arg4,arg5,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : \ _Generic \ ( \ (arg4), \ GB_CASES (, GrB, Vector_assign) , \ const GrB_Scalar : GrB_Vector_assign_Scalar , \ GrB_Scalar : GrB_Vector_assign_Scalar , \ default: GrB_Vector_assign \ ), \ default: \ _Generic \ ( \ (arg4), \ GB_CASES (, GrB, Matrix_assign) , \ const GrB_Scalar : GrB_Matrix_assign_Scalar , \ GrB_Scalar : GrB_Matrix_assign_Scalar , \ const GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GrB_Col_assign , \ GrB_Index *: GrB_Col_assign , \ default: GrB_Row_assign \ ), \ GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GrB_Col_assign , \ GrB_Index *: GrB_Col_assign , \ default: GrB_Row_assign \ ), \ default: GrB_Matrix_assign \ ) \ ) \ (arg1, Mask, accum, arg4, arg5, __VA_ARGS__) #endif //============================================================================== // GrB_apply: matrix and vector apply //============================================================================== // Apply a unary, index_unary, or binary operator to entries in a matrix or // vector, C<M> = accum (C, op (A)). GB_PUBLIC GrB_Info GrB_Vector_apply // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_UnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply // C<Mask> = accum (C, op(A)) or op(A') ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_UnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------- // vector apply: binaryop variants (bind 1st) //------------------------------------------- // Apply a binary operator to the entries in a vector, binding the first // input to a scalar x, w<mask> = accum (w, op (x,u)). GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_Scalar // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; // historical: identical to GxB_Vector_apply_BinaryOp1st GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp1st // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_BOOL // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries bool x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT8 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int8_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT16 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int16_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int32_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int64_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT8 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint8_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT16 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint16_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint32_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint64_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_FP32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries float x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_FP64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries double x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp1st_FC32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC32_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp1st_FC64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC64_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UDT // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const void *x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // vector apply: binaryop variants (bind 2nd) //------------------------------------------- // Apply a binary operator to the entries in a vector, binding the second // input to a scalar y, w<mask> = accum (w, op (u,y)). GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_Scalar // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; // historical: identical to GrB_Vector_apply_BinaryOp2nd_Scalar GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp2nd // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_BOOL // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT8 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT16 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT8 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT16 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_FP32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u float y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_FP64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u double y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp2nd_FC32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp2nd_FC64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UDT // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // vector apply: IndexUnaryOp variants //------------------------------------------- // Apply a GrB_IndexUnaryOp to the entries in a vector GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_Scalar // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_BOOL // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_FP32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u float y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_FP64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u double y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_IndexOp_FC32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_IndexOp_FC64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UDT // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // matrix apply: binaryop variants (bind 1st) //------------------------------------------- // Apply a binary operator to the entries in a matrix, binding the first input // to a scalar x, C<Mask> = accum (C, op (x,A)), or op(x,A'). GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_Scalar // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; // historical: identical to GrB_Matrix_apply_BinaryOp1st_Scalar GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp1st // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_BOOL // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries bool x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT8 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int8_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT16 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int16_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int32_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int64_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT8 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint8_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT16 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint16_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint32_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint64_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_FP32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries float x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_FP64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries double x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp1st_FC32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC32_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp1st_FC64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC64_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UDT // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const void *x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------- // matrix apply: binaryop variants (bind 2nd) //------------------------------------------- // Apply a binary operator to the entries in a matrix, binding the second input // to a scalar y, C<Mask> = accum (C, op (A,y)), or op(A',y). GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_Scalar // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; // historical: identical to GrB_Matrix_apply_BinaryOp2nd_Scalar GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp2nd // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_BOOL // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT8 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT16 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT8 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT16 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A float y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A double y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UDT // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------- // matrix apply: IndexUnaryOp variants //------------------------------------------- // Apply a GrB_IndexUnaryOp to the entries in a matrix. GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_Scalar // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_BOOL // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_FP32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A float y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_FP64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A double y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_IndexOp_FC32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_IndexOp_FC64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UDT // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------------------------------------------ // GrB_apply: generic matrix/vector apply //------------------------------------------------------------------------------ // GrB_apply is a generic function for applying a unary operator to a matrix // or vector and provides access to these functions: // GrB_Vector_apply (w,mask,acc,op,u,d) // w<mask> = accum (w, op(u)) // GrB_Matrix_apply (C,Mask,acc,op,A,d) // C<Mask> = accum (C, op(A)) // GrB_Vector_apply (w,m,acc,unop ,u,d) // GrB_Vector_apply_BinaryOp1st_TYPE (w,m,acc,binop,x,u,d) // GrB_Vector_apply_BinaryOp2nd_TYPE (w,m,acc,binop,u,y,d) // GrB_Vector_apply_IndexOp_TYPE (w,m,acc,idxop,u,y,d) // GrB_Matrix_apply (C,M,acc,unop ,A,d) // GrB_Matrix_apply_BinaryOp1st_TYPE (C,M,acc,binop,x,A,d) // GrB_Matrix_apply_BinaryOp2nd_TYPE (C,M,acc,binop,A,y,d) // GrB_Matrix_apply_IndexOp_TYPE (C,M,acc,idxop,A,y,d) #if GxB_STDC_VERSION >= 201112L #define GB_BIND(kind,x,y,...) \ _Generic \ ( \ (x), \ const GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \ GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \ GB_CASES (, GrB, GB_CONCAT ( kind, _apply_BinaryOp1st,, )) , \ default: \ _Generic \ ( \ (y), \ GB_CASES (, GrB, GB_CONCAT ( kind , _apply_BinaryOp2nd,, )), \ default: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp2nd_Scalar) \ ) \ ) #define GB_IDXOP(kind,A,y,...) \ _Generic \ ( \ (y), \ GB_CASES (, GrB, GB_CONCAT ( kind, _apply_IndexOp,, )), \ default: GB_CONCAT ( GrB, _, kind, _apply_IndexOp_Scalar) \ ) #define GrB_apply(C,Mask,accum,op,...) \ _Generic \ ( \ (C), \ GrB_Vector : \ _Generic \ ( \ (op), \ GrB_UnaryOp : GrB_Vector_apply , \ GrB_BinaryOp : GB_BIND (Vector, __VA_ARGS__), \ GrB_IndexUnaryOp : GB_IDXOP (Vector, __VA_ARGS__) \ ), \ GrB_Matrix : \ _Generic \ ( \ (op), \ GrB_UnaryOp : GrB_Matrix_apply , \ GrB_BinaryOp : GB_BIND (Matrix, __VA_ARGS__), \ GrB_IndexUnaryOp : GB_IDXOP (Matrix, __VA_ARGS__) \ ) \ ) \ (C, Mask, accum, op, __VA_ARGS__) #endif //============================================================================== // GrB_select: matrix and vector selection using an IndexUnaryOp //============================================================================== //------------------------------------------- // vector select using an IndexUnaryOp //------------------------------------------- GB_PUBLIC GrB_Info GrB_Vector_select_Scalar // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_BOOL // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_FP32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u float y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_FP64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u double y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_select_FC32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_select_FC64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UDT // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // matrix select using an IndexUnaryOp //------------------------------------------- GB_PUBLIC GrB_Info GrB_Matrix_select_Scalar // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_BOOL // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_FP32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A float y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_FP64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A double y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_select_FC32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_select_FC64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UDT // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; // GrB_select is a generic method that applies an IndexUnaryOp to // a matrix or vector, using any type of the scalar y. // GrB_Vector_select_TYPE (w,m,acc,idxop,u,y,d) // GrB_Matrix_select_TYPE (C,M,acc,idxop,A,y,d) #if GxB_STDC_VERSION >= 201112L #define GrB_select(C,Mask,accum,op,x,y,d) \ _Generic \ ( \ (C), \ GrB_Vector : \ _Generic \ ( \ (y), \ GB_CASES (, GrB, Vector_select), \ default: GrB_Vector_select_Scalar \ ), \ GrB_Matrix : \ _Generic \ ( \ (y), \ GB_CASES (, GrB, Matrix_select), \ default: GrB_Matrix_select_Scalar \ ) \ ) \ (C, Mask, accum, op, x, y, d) #endif //============================================================================== // GxB_select: matrix and vector selection (historical) //============================================================================== // GrB_select and with the GrB_IndexUnaryOp operators should be used instead. GB_PUBLIC GrB_Info GxB_Vector_select // w<mask> = accum (w, op(u,k)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GxB_SelectOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar Thunk, // optional input for the select operator const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_select // C<Mask> = accum (C, op(A,k)) or op(A',k) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GxB_SelectOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar Thunk, // optional input for the select operator const GrB_Descriptor desc // descriptor for C, mask, and A ) ; #if GxB_STDC_VERSION >= 201112L #define GxB_select(C,Mask,accum,op,A,Thunk,desc) \ _Generic \ ( \ (C), \ GrB_Vector : GxB_Vector_select , \ GrB_Matrix : GxB_Matrix_select \ ) \ (C, Mask, accum, op, A, Thunk, desc) #endif //============================================================================== // GrB_reduce: matrix and vector reduction //============================================================================== // Reduce the entries in a matrix to a vector, a column vector t such that // t(i) = sum (A (i,:)), and where "sum" is a commutative and associative // monoid with an identity value. A can be transposed, which reduces down the // columns instead of the rows. // For GrB_Matrix_reduce_BinaryOp, the GrB_BinaryOp op must correspond to a // known built-in monoid: // // operator data-types (all built-in) // ---------------------- --------------------------- // MIN, MAX INT*, UINT*, FP* // TIMES, PLUS INT*, UINT*, FP*, FC* // ANY INT*, UINT*, FP*, FC*, BOOL // LOR, LAND, LXOR, EQ BOOL // BOR, BAND, BXOR, BXNOR UINT* GB_PUBLIC GrB_Info GrB_Matrix_reduce_Monoid // w<mask> = accum (w,reduce(A)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Monoid monoid, // reduce operator for t=reduce(A) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for w, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_BinaryOp // w<mask> = accum (w,reduce(A)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // reduce operator for t=reduce(A) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for w, mask, and A ) ; //------------------------------------------------------------------------------ // reduce a vector to a scalar //------------------------------------------------------------------------------ // Reduce entries in a vector to a scalar, c = accum (c, reduce_to_scalar(u)) GB_PUBLIC GrB_Info GrB_Vector_reduce_BOOL // c = accum (c, reduce_to_scalar (u)) ( bool *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT8 // c = accum (c, reduce_to_scalar (u)) ( int8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT8 // c = accum (c, reduce_to_scalar (u)) ( uint8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT16 // c = accum (c, reduce_to_scalar (u)) ( int16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT16 // c = accum (c, reduce_to_scalar (u)) ( uint16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT32 // c = accum (c, reduce_to_scalar (u)) ( int32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT32 // c = accum (c, reduce_to_scalar (u)) ( uint32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT64 // c = accum (c, reduce_to_scalar (u)) ( int64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT64 // c = accum (c, reduce_to_scalar (u)) ( uint64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_FP32 // c = accum (c, reduce_to_scalar (u)) ( float *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_FP64 // c = accum (c, reduce_to_scalar (u)) ( double *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_reduce_FC32 // c = accum (c, reduce_to_scalar (u)) ( GxB_FC32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_reduce_FC64 // c = accum (c, reduce_to_scalar (u)) ( GxB_FC64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UDT // c = accum (c, reduce_to_scalar (u)) ( void *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(u)) ( GrB_Scalar c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_BinaryOp_Scalar ( GrB_Scalar c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_BinaryOp op, // binary op to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // reduce a matrix to a scalar //------------------------------------------------------------------------------ // Reduce entries in a matrix to a scalar, c = accum (c, reduce_to_scalar(A)) GB_PUBLIC GrB_Info GrB_Matrix_reduce_BOOL // c = accum (c, reduce_to_scalar (A)) ( bool *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT8 // c = accum (c, reduce_to_scalar (A)) ( int8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT8 // c = accum (c, reduce_to_scalar (A)) ( uint8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT16 // c = accum (c, reduce_to_scalar (A)) ( int16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT16 // c = accum (c, reduce_to_scalar (A)) ( uint16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT32 // c = accum (c, reduce_to_scalar (A)) ( int32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT32 // c = accum (c, reduce_to_scalar (A)) ( uint32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT64 // c = accum (c, reduce_to_scalar (A)) ( int64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT64 // c = accum (c, reduce_to_scalar (A)) ( uint64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_FP32 // c = accum (c, reduce_to_scalar (A)) ( float *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_FP64 // c = accum (c, reduce_to_scalar (A)) ( double *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_reduce_FC32 // c = accum (c, reduce_to_scalar (A)) ( GxB_FC32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_reduce_FC64 // c = accum (c, reduce_to_scalar (A)) ( GxB_FC64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UDT // c = accum (c, reduce_to_scalar (A)) ( void *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(A)) ( GrB_Scalar c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_BinaryOp_Scalar ( GrB_Scalar S, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_BinaryOp op, // binary op to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GrB_reduce: generic matrix/vector reduction to a vector or scalar //------------------------------------------------------------------------------ // GrB_reduce is a generic function that provides access to all GrB_*reduce* // functions: // reduce matrix to vector: // GrB_Matrix_reduce_Monoid (w,mask,acc,mo,A,d) // w<mask> = acc (w,reduce(A)) // GrB_Matrix_reduce_BinaryOp (w,mask,acc,op,A,d) // w<mask> = acc (w,reduce(A)) // reduce matrix to scalar: // GrB_Vector_reduce_[SCALAR] (c,acc,monoid,u,d) // c = acc (c,reduce(u)) // GrB_Matrix_reduce_[SCALAR] (c,acc,monoid,A,d) // c = acc (c,reduce(A)) // GrB_Vector_reduce_Monoid_Scalar (s,acc,monoid,u,d) // s = acc (s,reduce(u)) // GrB_Matrix_reduce_Monoid_Scalar (s,acc,monoid,A,d) // s = acc (s,reduce(A)) // GrB_Vector_reduce_BinaryOp_Scalar (s,acc,op,u,d) // s = acc (s,reduce(u)) // GrB_Matrix_reduce_BinaryOp_Scalar (s,acc,op,A,d) // s = acc (s,reduce(A)) #if GxB_STDC_VERSION >= 201112L #define GB_REDUCE_TO_SCALAR(kind,c,op) \ _Generic \ ( \ (c), \ GB_CASES (*, GrB, GB_CONCAT ( kind, _reduce,, )), \ default: \ _Generic \ ( \ (op), \ const GrB_BinaryOp : \ GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\ GrB_BinaryOp : \ GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\ default: GB_CONCAT (GrB,_,kind,_reduce_Monoid_Scalar) \ ) \ ) #define GrB_reduce(arg1,arg2,arg3,arg4,...) \ _Generic \ ( \ (arg4), \ const GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \ GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \ const GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \ GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \ const GrB_Monoid : GrB_Matrix_reduce_Monoid , \ GrB_Monoid : GrB_Matrix_reduce_Monoid , \ const GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp \ ) \ (arg1, arg2, arg3, arg4, __VA_ARGS__) #endif //============================================================================== // GrB_transpose: matrix transpose //============================================================================== GB_PUBLIC GrB_Info GrB_transpose // C<Mask> = accum (C, A') ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for C, Mask, and A ) ; //============================================================================== // GrB_kronecker: Kronecker product //============================================================================== // GxB_kron is historical; use GrB_kronecker instead GB_PUBLIC GrB_Info GxB_kron // C<Mask> = accum(C,kron(A,B)) (historical) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_kronecker_BinaryOp // C<M> = accum (C, kron(A,B)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix M, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_kronecker_Monoid // C<M> = accum (C, kron(A,B)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix M, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Monoid monoid, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_kronecker_Semiring // C<M> = accum (C, kron(A,B)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix M, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_kronecker(C,Mask,accum,op,A,B,desc) \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Matrix_kronecker_Semiring , \ GrB_Semiring : GrB_Matrix_kronecker_Semiring , \ const GrB_Monoid : GrB_Matrix_kronecker_Monoid , \ GrB_Monoid : GrB_Matrix_kronecker_Monoid , \ const GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp \ ) \ (C, Mask, accum, op, A, B, desc) #endif //============================================================================== // GrB_Monoid: built-in monoids //============================================================================== GB_PUBLIC GrB_Monoid //-------------------------------------------------------------------------- // 10 MIN monoids: (not for complex types) //-------------------------------------------------------------------------- // GxB_MIN monoids, historical, use GrB_MIN_MONOID_* instead: GxB_MIN_INT8_MONOID, // identity: INT8_MAX terminal: INT8_MIN GxB_MIN_INT16_MONOID, // identity: INT16_MAX terminal: INT16_MIN GxB_MIN_INT32_MONOID, // identity: INT32_MAX terminal: INT32_MIN GxB_MIN_INT64_MONOID, // identity: INT64_MAX terminal: INT32_MIN GxB_MIN_UINT8_MONOID, // identity: UINT8_MAX terminal: 0 GxB_MIN_UINT16_MONOID, // identity: UINT16_MAX terminal: 0 GxB_MIN_UINT32_MONOID, // identity: UINT32_MAX terminal: 0 GxB_MIN_UINT64_MONOID, // identity: UINT64_MAX terminal: 0 GxB_MIN_FP32_MONOID, // identity: INFINITY terminal: -INFINITY GxB_MIN_FP64_MONOID, // identity: INFINITY terminal: -INFINITY // preferred names from the v1.3 spec: GrB_MIN_MONOID_INT8, // identity: INT8_MAX terminal: INT8_MIN GrB_MIN_MONOID_INT16, // identity: INT16_MAX terminal: INT16_MIN GrB_MIN_MONOID_INT32, // identity: INT32_MAX terminal: INT32_MIN GrB_MIN_MONOID_INT64, // identity: INT64_MAX terminal: INT32_MIN GrB_MIN_MONOID_UINT8, // identity: UINT8_MAX terminal: 0 GrB_MIN_MONOID_UINT16, // identity: UINT16_MAX terminal: 0 GrB_MIN_MONOID_UINT32, // identity: UINT32_MAX terminal: 0 GrB_MIN_MONOID_UINT64, // identity: UINT64_MAX terminal: 0 GrB_MIN_MONOID_FP32, // identity: INFINITY terminal: -INFINITY GrB_MIN_MONOID_FP64, // identity: INFINITY terminal: -INFINITY //-------------------------------------------------------------------------- // 10 MAX monoids: //-------------------------------------------------------------------------- // GxB_MAX monoids, historical, use GrB_MAX_MONOID_* instead: GxB_MAX_INT8_MONOID, // identity: INT8_MIN terminal: INT8_MAX GxB_MAX_INT16_MONOID, // identity: INT16_MIN terminal: INT16_MAX GxB_MAX_INT32_MONOID, // identity: INT32_MIN terminal: INT32_MAX GxB_MAX_INT64_MONOID, // identity: INT64_MIN terminal: INT64_MAX GxB_MAX_UINT8_MONOID, // identity: 0 terminal: UINT8_MAX GxB_MAX_UINT16_MONOID, // identity: 0 terminal: UINT16_MAX GxB_MAX_UINT32_MONOID, // identity: 0 terminal: UINT32_MAX GxB_MAX_UINT64_MONOID, // identity: 0 terminal: UINT64_MAX GxB_MAX_FP32_MONOID, // identity: -INFINITY terminal: INFINITY GxB_MAX_FP64_MONOID, // identity: -INFINITY terminal: INFINITY // preferred names from the v1.3 spec: GrB_MAX_MONOID_INT8, // identity: INT8_MIN terminal: INT8_MAX GrB_MAX_MONOID_INT16, // identity: INT16_MIN terminal: INT16_MAX GrB_MAX_MONOID_INT32, // identity: INT32_MIN terminal: INT32_MAX GrB_MAX_MONOID_INT64, // identity: INT64_MIN terminal: INT64_MAX GrB_MAX_MONOID_UINT8, // identity: 0 terminal: UINT8_MAX GrB_MAX_MONOID_UINT16, // identity: 0 terminal: UINT16_MAX GrB_MAX_MONOID_UINT32, // identity: 0 terminal: UINT32_MAX GrB_MAX_MONOID_UINT64, // identity: 0 terminal: UINT64_MAX GrB_MAX_MONOID_FP32, // identity: -INFINITY terminal: INFINITY GrB_MAX_MONOID_FP64, // identity: -INFINITY terminal: INFINITY //-------------------------------------------------------------------------- // 12 PLUS monoids: //-------------------------------------------------------------------------- // GxB_PLUS monoids, historical, use GrB_PLUS_MONOID_* instead: GxB_PLUS_INT8_MONOID, // identity: 0 GxB_PLUS_INT16_MONOID, // identity: 0 GxB_PLUS_INT32_MONOID, // identity: 0 GxB_PLUS_INT64_MONOID, // identity: 0 GxB_PLUS_UINT8_MONOID, // identity: 0 GxB_PLUS_UINT16_MONOID, // identity: 0 GxB_PLUS_UINT32_MONOID, // identity: 0 GxB_PLUS_UINT64_MONOID, // identity: 0 GxB_PLUS_FP32_MONOID, // identity: 0 GxB_PLUS_FP64_MONOID, // identity: 0 // preferred names from the v1.3 spec: GrB_PLUS_MONOID_INT8, // identity: 0 GrB_PLUS_MONOID_INT16, // identity: 0 GrB_PLUS_MONOID_INT32, // identity: 0 GrB_PLUS_MONOID_INT64, // identity: 0 GrB_PLUS_MONOID_UINT8, // identity: 0 GrB_PLUS_MONOID_UINT16, // identity: 0 GrB_PLUS_MONOID_UINT32, // identity: 0 GrB_PLUS_MONOID_UINT64, // identity: 0 GrB_PLUS_MONOID_FP32, // identity: 0 GrB_PLUS_MONOID_FP64, // identity: 0 // complex monoids: GxB_PLUS_FC32_MONOID, // identity: 0 GxB_PLUS_FC64_MONOID, // identity: 0 //-------------------------------------------------------------------------- // 12 TIMES monoids: identity value is 1, int* and uint* are terminal //-------------------------------------------------------------------------- // GxB_TIMES monoids, historical, use GrB_TIMES_MONOID_* instead: GxB_TIMES_INT8_MONOID, // identity: 1 terminal: 0 GxB_TIMES_INT16_MONOID, // identity: 1 terminal: 0 GxB_TIMES_INT32_MONOID, // identity: 1 terminal: 0 GxB_TIMES_INT64_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT8_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT16_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT32_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT64_MONOID, // identity: 1 terminal: 0 GxB_TIMES_FP32_MONOID, // identity: 1 GxB_TIMES_FP64_MONOID, // identity: 1 // preferred names from the v1.3 spec: GrB_TIMES_MONOID_INT8, // identity: 1 terminal: 0 GrB_TIMES_MONOID_INT16, // identity: 1 terminal: 0 GrB_TIMES_MONOID_INT32, // identity: 1 terminal: 0 GrB_TIMES_MONOID_INT64, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT8, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT16, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT32, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT64, // identity: 1 terminal: 0 GrB_TIMES_MONOID_FP32, // identity: 1 GrB_TIMES_MONOID_FP64, // identity: 1 // complex monoids: GxB_TIMES_FC32_MONOID, // identity: 1 GxB_TIMES_FC64_MONOID, // identity: 1 //-------------------------------------------------------------------------- // 13 ANY monoids: //-------------------------------------------------------------------------- GxB_ANY_BOOL_MONOID, // identity: any value terminal: any value GxB_ANY_INT8_MONOID, // identity: any value terminal: any value GxB_ANY_INT16_MONOID, // identity: any value terminal: any value GxB_ANY_INT32_MONOID, // identity: any value terminal: any value GxB_ANY_INT64_MONOID, // identity: any value terminal: any value GxB_ANY_UINT8_MONOID, // identity: any value terminal: any value GxB_ANY_UINT16_MONOID, // identity: any value terminal: any value GxB_ANY_UINT32_MONOID, // identity: any value terminal: any value GxB_ANY_UINT64_MONOID, // identity: any value terminal: any value GxB_ANY_FP32_MONOID, // identity: any value terminal: any value GxB_ANY_FP64_MONOID, // identity: any value terminal: any value GxB_ANY_FC32_MONOID, // identity: any value terminal: any value GxB_ANY_FC64_MONOID, // identity: any value terminal: any value //-------------------------------------------------------------------------- // 4 Boolean monoids: (see also the GxB_ANY_BOOL_MONOID above) //-------------------------------------------------------------------------- // GxB_* boolean monoids, historical, use GrB_* instead: GxB_LOR_BOOL_MONOID, // identity: false terminal: true GxB_LAND_BOOL_MONOID, // identity: true terminal: false GxB_LXOR_BOOL_MONOID, // identity: false GxB_LXNOR_BOOL_MONOID, // identity: true GxB_EQ_BOOL_MONOID, // (alternative name for GrB_LXNOR_MONOID_BOOL) // preferred names from the v1.3 spec: GrB_LOR_MONOID_BOOL, // identity: false terminal: true GrB_LAND_MONOID_BOOL, // identity: true terminal: false GrB_LXOR_MONOID_BOOL, // identity: false GrB_LXNOR_MONOID_BOOL, // identity: true //-------------------------------------------------------------------------- // 16 Bitwise-or monoids: //-------------------------------------------------------------------------- // BOR monoids (bitwise or): GxB_BOR_UINT8_MONOID, // identity: 0 terminal: 0xFF GxB_BOR_UINT16_MONOID, // identity: 0 terminal: 0xFFFF GxB_BOR_UINT32_MONOID, // identity: 0 terminal: 0xFFFFFFFF GxB_BOR_UINT64_MONOID, // identity: 0 terminal: 0xFFFFFFFFFFFFFFFF // BAND monoids (bitwise and): GxB_BAND_UINT8_MONOID, // identity: 0xFF terminal: 0 GxB_BAND_UINT16_MONOID, // identity: 0xFFFF terminal: 0 GxB_BAND_UINT32_MONOID, // identity: 0xFFFFFFFF terminal: 0 GxB_BAND_UINT64_MONOID, // identity: 0xFFFFFFFFFFFFFFFF terminal: 0 // BXOR monoids (bitwise xor): GxB_BXOR_UINT8_MONOID, // identity: 0 GxB_BXOR_UINT16_MONOID, // identity: 0 GxB_BXOR_UINT32_MONOID, // identity: 0 GxB_BXOR_UINT64_MONOID, // identity: 0 // BXNOR monoids (bitwise xnor): GxB_BXNOR_UINT8_MONOID, // identity: 0xFF GxB_BXNOR_UINT16_MONOID, // identity: 0xFFFF GxB_BXNOR_UINT32_MONOID, // identity: 0xFFFFFFFF GxB_BXNOR_UINT64_MONOID ; // identity: 0xFFFFFFFFFFFFFFFF //============================================================================== // GrB_Semiring: built-in semirings //============================================================================== // Using built-in types and operators, SuiteSparse:GraphBLAS provides // 1553 pre-defined, built-in semirings: // 1000 semirings with a multiply operator TxT -> T where T is non-Boolean, // from the complete cross product of: // 5 monoids: MIN, MAX, PLUS, TIMES, ANY // 20 multiply operators: // FIRST, SECOND, PAIR (=ONEB), MIN, MAX, PLUS, MINUS, TIMES, DIV, // RDIV, RMINUS // ISEQ, ISNE, ISGT, ISLT, ISGE, ISLE, // LOR, LAND, LXOR // 10 non-Boolean real types, T // // Note that min_pair, max_pair, times_pair are all identical to any_pair. // These 30 semirings are named below, but are internally remapped to // their corresponding any_pair semiring. // 300 semirings with a comparator TxT -> bool, where T is // non-Boolean, from the complete cross product of: // 5 Boolean monoids: LAND, LOR, LXOR, EQ (=LXNOR), ANY // 6 multiply operators: EQ, NE, GT, LT, GE, LE // 10 non-Boolean real types, T // 55 semirings with purely Boolean types, bool x bool -> bool, from the // complete cross product of: // 5 Boolean monoids LAND, LOR, LXOR, EQ (=LXNOR), ANY // 11 multiply operators: // FIRST, SECOND, LOR, LAND, LXOR, EQ (=LXNOR), GT, LT, GE, LE, // PAIR (=ONEB) // // Note that lor_pair, land_pair, and eq_pair are all identical to // any_pair. These 3 semirings are named below, but are internally // remapped to any_pair_bool semiring. // 54 complex semirings: TxT -> T where T is float complex or double complex: // 3 complex monoids: PLUS, TIMES, ANY // 9 complex multiply operators: // FIRST, SECOND, PAIR (=ONEB), PLUS, MINUS, TIMES, DIV, RDIV, RMINUS // 2 complex types // // Note that times_pair is identical to any_pair. // These 2 semirings are named below, but are internally remapped to // their corresponding any_pair semiring. // 64 bitwise semirings: TxT -> T where T is an unsigned integer: // 4 bitwise monoids: BOR, BAND, BXOR, BXNOR // 4 bitwise multiply operators: BOR, BAND, BXOR, BXNOR // 4 unsigned integer types: UINT8, UINT16, UINT32, UINT64 // 80 positional semirings: XxX -> T where T is int64 or int32, and the type of // X is ignored: // 5 monoids: MIN, MAX, PLUS, TIMES, ANY // 8 multiply operators: // FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1, // SECONDI, SECONDI1, SECONDJ, SECONDJ1 // 2 types: int32, int64 // The ANY operator is also valid to use as a multiplicative operator in a // semiring, but serves no purpose in that case. The ANY operator is meant as // a fast additive operator for a monoid, that terminates, or short-circuits, // as soon as any value is found. A valid user semiring can be constructed // with ANY as the multiply operator, but they are not predefined below. // Likewise, additional built-in operators can be used as multiplicative // operators for floating-point semirings (POW, ATAN2, HYPOT, ...) and many // more semirings can be constructed from bitwise monoids and many integer // binary (non-bitwise) multiplicative operators, but these are not // pre-defined. // In the names below, each semiring has a name of the form GxB_add_mult_T // where add is the additive monoid, mult is the multiply operator, and T is // the type. The type T is always the type of x and y for the z=mult(x,y) // operator. The monoid's three types and the ztype of the mult operator are // always the same. This is the type T for the first set, and Boolean for // the second and third sets of semirngs. // 1553 = 1000 + 300 + 55 + 54 + 64 + 80 semirings are named below, but 35 = 30 // + 3 + 2 are identical to the corresponding any_pair semirings of the same // type. For positional semirings, the mulitiply ops FIRSTJ and SECONDI are // identical, as are FIRSTJ1 and SECONDI1. These semirings still appear as // predefined, for convenience. GB_PUBLIC GrB_Semiring //------------------------------------------------------------------------------ // 1000 non-Boolean semirings where all types are the same, given by suffix _T //------------------------------------------------------------------------------ // semirings with multiply op: z = FIRST (x,y), all types x,y,z the same: GxB_MIN_FIRST_INT8 , GxB_MAX_FIRST_INT8 , GxB_PLUS_FIRST_INT8 , GxB_TIMES_FIRST_INT8 , GxB_ANY_FIRST_INT8 , GxB_MIN_FIRST_INT16 , GxB_MAX_FIRST_INT16 , GxB_PLUS_FIRST_INT16 , GxB_TIMES_FIRST_INT16 , GxB_ANY_FIRST_INT16 , GxB_MIN_FIRST_INT32 , GxB_MAX_FIRST_INT32 , GxB_PLUS_FIRST_INT32 , GxB_TIMES_FIRST_INT32 , GxB_ANY_FIRST_INT32 , GxB_MIN_FIRST_INT64 , GxB_MAX_FIRST_INT64 , GxB_PLUS_FIRST_INT64 , GxB_TIMES_FIRST_INT64 , GxB_ANY_FIRST_INT64 , GxB_MIN_FIRST_UINT8 , GxB_MAX_FIRST_UINT8 , GxB_PLUS_FIRST_UINT8 , GxB_TIMES_FIRST_UINT8 , GxB_ANY_FIRST_UINT8 , GxB_MIN_FIRST_UINT16 , GxB_MAX_FIRST_UINT16 , GxB_PLUS_FIRST_UINT16 , GxB_TIMES_FIRST_UINT16 , GxB_ANY_FIRST_UINT16 , GxB_MIN_FIRST_UINT32 , GxB_MAX_FIRST_UINT32 , GxB_PLUS_FIRST_UINT32 , GxB_TIMES_FIRST_UINT32 , GxB_ANY_FIRST_UINT32 , GxB_MIN_FIRST_UINT64 , GxB_MAX_FIRST_UINT64 , GxB_PLUS_FIRST_UINT64 , GxB_TIMES_FIRST_UINT64 , GxB_ANY_FIRST_UINT64 , GxB_MIN_FIRST_FP32 , GxB_MAX_FIRST_FP32 , GxB_PLUS_FIRST_FP32 , GxB_TIMES_FIRST_FP32 , GxB_ANY_FIRST_FP32 , GxB_MIN_FIRST_FP64 , GxB_MAX_FIRST_FP64 , GxB_PLUS_FIRST_FP64 , GxB_TIMES_FIRST_FP64 , GxB_ANY_FIRST_FP64 , // semirings with multiply op: z = SECOND (x,y), all types x,y,z the same: GxB_MIN_SECOND_INT8 , GxB_MAX_SECOND_INT8 , GxB_PLUS_SECOND_INT8 , GxB_TIMES_SECOND_INT8 , GxB_ANY_SECOND_INT8 , GxB_MIN_SECOND_INT16 , GxB_MAX_SECOND_INT16 , GxB_PLUS_SECOND_INT16 , GxB_TIMES_SECOND_INT16 , GxB_ANY_SECOND_INT16 , GxB_MIN_SECOND_INT32 , GxB_MAX_SECOND_INT32 , GxB_PLUS_SECOND_INT32 , GxB_TIMES_SECOND_INT32 , GxB_ANY_SECOND_INT32 , GxB_MIN_SECOND_INT64 , GxB_MAX_SECOND_INT64 , GxB_PLUS_SECOND_INT64 , GxB_TIMES_SECOND_INT64 , GxB_ANY_SECOND_INT64 , GxB_MIN_SECOND_UINT8 , GxB_MAX_SECOND_UINT8 , GxB_PLUS_SECOND_UINT8 , GxB_TIMES_SECOND_UINT8 , GxB_ANY_SECOND_UINT8 , GxB_MIN_SECOND_UINT16 , GxB_MAX_SECOND_UINT16 , GxB_PLUS_SECOND_UINT16 , GxB_TIMES_SECOND_UINT16, GxB_ANY_SECOND_UINT16 , GxB_MIN_SECOND_UINT32 , GxB_MAX_SECOND_UINT32 , GxB_PLUS_SECOND_UINT32 , GxB_TIMES_SECOND_UINT32, GxB_ANY_SECOND_UINT32 , GxB_MIN_SECOND_UINT64 , GxB_MAX_SECOND_UINT64 , GxB_PLUS_SECOND_UINT64 , GxB_TIMES_SECOND_UINT64, GxB_ANY_SECOND_UINT64 , GxB_MIN_SECOND_FP32 , GxB_MAX_SECOND_FP32 , GxB_PLUS_SECOND_FP32 , GxB_TIMES_SECOND_FP32 , GxB_ANY_SECOND_FP32 , GxB_MIN_SECOND_FP64 , GxB_MAX_SECOND_FP64 , GxB_PLUS_SECOND_FP64 , GxB_TIMES_SECOND_FP64 , GxB_ANY_SECOND_FP64 , // semirings with multiply op: z = PAIR (x,y), all types x,y,z the same: // (note that min_pair, max_pair, times_pair are all identical to any_pair, and are marked below) GxB_MIN_PAIR_INT8 /**/, GxB_MAX_PAIR_INT8 /**/, GxB_PLUS_PAIR_INT8 , GxB_TIMES_PAIR_INT8 /**/, GxB_ANY_PAIR_INT8 , GxB_MIN_PAIR_INT16 /**/, GxB_MAX_PAIR_INT16 /**/, GxB_PLUS_PAIR_INT16 , GxB_TIMES_PAIR_INT16 /**/, GxB_ANY_PAIR_INT16 , GxB_MIN_PAIR_INT32 /**/, GxB_MAX_PAIR_INT32 /**/, GxB_PLUS_PAIR_INT32 , GxB_TIMES_PAIR_INT32 /**/, GxB_ANY_PAIR_INT32 , GxB_MIN_PAIR_INT64 /**/, GxB_MAX_PAIR_INT64 /**/, GxB_PLUS_PAIR_INT64 , GxB_TIMES_PAIR_INT64 /**/, GxB_ANY_PAIR_INT64 , GxB_MIN_PAIR_UINT8 /**/, GxB_MAX_PAIR_UINT8 /**/, GxB_PLUS_PAIR_UINT8 , GxB_TIMES_PAIR_UINT8 /**/, GxB_ANY_PAIR_UINT8 , GxB_MIN_PAIR_UINT16/**/, GxB_MAX_PAIR_UINT16/**/, GxB_PLUS_PAIR_UINT16 , GxB_TIMES_PAIR_UINT16/**/, GxB_ANY_PAIR_UINT16 , GxB_MIN_PAIR_UINT32/**/, GxB_MAX_PAIR_UINT32/**/, GxB_PLUS_PAIR_UINT32 , GxB_TIMES_PAIR_UINT32/**/, GxB_ANY_PAIR_UINT32 , GxB_MIN_PAIR_UINT64/**/, GxB_MAX_PAIR_UINT64/**/, GxB_PLUS_PAIR_UINT64 , GxB_TIMES_PAIR_UINT64/**/, GxB_ANY_PAIR_UINT64 , GxB_MIN_PAIR_FP32 /**/, GxB_MAX_PAIR_FP32 /**/, GxB_PLUS_PAIR_FP32 , GxB_TIMES_PAIR_FP32 /**/, GxB_ANY_PAIR_FP32 , GxB_MIN_PAIR_FP64 /**/, GxB_MAX_PAIR_FP64 /**/, GxB_PLUS_PAIR_FP64 , GxB_TIMES_PAIR_FP64 /**/, GxB_ANY_PAIR_FP64 , // semirings with multiply op: z = MIN (x,y), all types x,y,z the same: GxB_MIN_MIN_INT8 , GxB_MAX_MIN_INT8 , GxB_PLUS_MIN_INT8 , GxB_TIMES_MIN_INT8 , GxB_ANY_MIN_INT8 , GxB_MIN_MIN_INT16 , GxB_MAX_MIN_INT16 , GxB_PLUS_MIN_INT16 , GxB_TIMES_MIN_INT16 , GxB_ANY_MIN_INT16 , GxB_MIN_MIN_INT32 , GxB_MAX_MIN_INT32 , GxB_PLUS_MIN_INT32 , GxB_TIMES_MIN_INT32 , GxB_ANY_MIN_INT32 , GxB_MIN_MIN_INT64 , GxB_MAX_MIN_INT64 , GxB_PLUS_MIN_INT64 , GxB_TIMES_MIN_INT64 , GxB_ANY_MIN_INT64 , GxB_MIN_MIN_UINT8 , GxB_MAX_MIN_UINT8 , GxB_PLUS_MIN_UINT8 , GxB_TIMES_MIN_UINT8 , GxB_ANY_MIN_UINT8 , GxB_MIN_MIN_UINT16 , GxB_MAX_MIN_UINT16 , GxB_PLUS_MIN_UINT16 , GxB_TIMES_MIN_UINT16 , GxB_ANY_MIN_UINT16 , GxB_MIN_MIN_UINT32 , GxB_MAX_MIN_UINT32 , GxB_PLUS_MIN_UINT32 , GxB_TIMES_MIN_UINT32 , GxB_ANY_MIN_UINT32 , GxB_MIN_MIN_UINT64 , GxB_MAX_MIN_UINT64 , GxB_PLUS_MIN_UINT64 , GxB_TIMES_MIN_UINT64 , GxB_ANY_MIN_UINT64 , GxB_MIN_MIN_FP32 , GxB_MAX_MIN_FP32 , GxB_PLUS_MIN_FP32 , GxB_TIMES_MIN_FP32 , GxB_ANY_MIN_FP32 , GxB_MIN_MIN_FP64 , GxB_MAX_MIN_FP64 , GxB_PLUS_MIN_FP64 , GxB_TIMES_MIN_FP64 , GxB_ANY_MIN_FP64 , // semirings with multiply op: z = MAX (x,y), all types x,y,z the same: GxB_MIN_MAX_INT8 , GxB_MAX_MAX_INT8 , GxB_PLUS_MAX_INT8 , GxB_TIMES_MAX_INT8 , GxB_ANY_MAX_INT8 , GxB_MIN_MAX_INT16 , GxB_MAX_MAX_INT16 , GxB_PLUS_MAX_INT16 , GxB_TIMES_MAX_INT16 , GxB_ANY_MAX_INT16 , GxB_MIN_MAX_INT32 , GxB_MAX_MAX_INT32 , GxB_PLUS_MAX_INT32 , GxB_TIMES_MAX_INT32 , GxB_ANY_MAX_INT32 , GxB_MIN_MAX_INT64 , GxB_MAX_MAX_INT64 , GxB_PLUS_MAX_INT64 , GxB_TIMES_MAX_INT64 , GxB_ANY_MAX_INT64 , GxB_MIN_MAX_UINT8 , GxB_MAX_MAX_UINT8 , GxB_PLUS_MAX_UINT8 , GxB_TIMES_MAX_UINT8 , GxB_ANY_MAX_UINT8 , GxB_MIN_MAX_UINT16 , GxB_MAX_MAX_UINT16 , GxB_PLUS_MAX_UINT16 , GxB_TIMES_MAX_UINT16 , GxB_ANY_MAX_UINT16 , GxB_MIN_MAX_UINT32 , GxB_MAX_MAX_UINT32 , GxB_PLUS_MAX_UINT32 , GxB_TIMES_MAX_UINT32 , GxB_ANY_MAX_UINT32 , GxB_MIN_MAX_UINT64 , GxB_MAX_MAX_UINT64 , GxB_PLUS_MAX_UINT64 , GxB_TIMES_MAX_UINT64 , GxB_ANY_MAX_UINT64 , GxB_MIN_MAX_FP32 , GxB_MAX_MAX_FP32 , GxB_PLUS_MAX_FP32 , GxB_TIMES_MAX_FP32 , GxB_ANY_MAX_FP32 , GxB_MIN_MAX_FP64 , GxB_MAX_MAX_FP64 , GxB_PLUS_MAX_FP64 , GxB_TIMES_MAX_FP64 , GxB_ANY_MAX_FP64 , // semirings with multiply op: z = PLUS (x,y), all types x,y,z the same: GxB_MIN_PLUS_INT8 , GxB_MAX_PLUS_INT8 , GxB_PLUS_PLUS_INT8 , GxB_TIMES_PLUS_INT8 , GxB_ANY_PLUS_INT8 , GxB_MIN_PLUS_INT16 , GxB_MAX_PLUS_INT16 , GxB_PLUS_PLUS_INT16 , GxB_TIMES_PLUS_INT16 , GxB_ANY_PLUS_INT16 , GxB_MIN_PLUS_INT32 , GxB_MAX_PLUS_INT32 , GxB_PLUS_PLUS_INT32 , GxB_TIMES_PLUS_INT32 , GxB_ANY_PLUS_INT32 , GxB_MIN_PLUS_INT64 , GxB_MAX_PLUS_INT64 , GxB_PLUS_PLUS_INT64 , GxB_TIMES_PLUS_INT64 , GxB_ANY_PLUS_INT64 , GxB_MIN_PLUS_UINT8 , GxB_MAX_PLUS_UINT8 , GxB_PLUS_PLUS_UINT8 , GxB_TIMES_PLUS_UINT8 , GxB_ANY_PLUS_UINT8 , GxB_MIN_PLUS_UINT16 , GxB_MAX_PLUS_UINT16 , GxB_PLUS_PLUS_UINT16 , GxB_TIMES_PLUS_UINT16 , GxB_ANY_PLUS_UINT16 , GxB_MIN_PLUS_UINT32 , GxB_MAX_PLUS_UINT32 , GxB_PLUS_PLUS_UINT32 , GxB_TIMES_PLUS_UINT32 , GxB_ANY_PLUS_UINT32 , GxB_MIN_PLUS_UINT64 , GxB_MAX_PLUS_UINT64 , GxB_PLUS_PLUS_UINT64 , GxB_TIMES_PLUS_UINT64 , GxB_ANY_PLUS_UINT64 , GxB_MIN_PLUS_FP32 , GxB_MAX_PLUS_FP32 , GxB_PLUS_PLUS_FP32 , GxB_TIMES_PLUS_FP32 , GxB_ANY_PLUS_FP32 , GxB_MIN_PLUS_FP64 , GxB_MAX_PLUS_FP64 , GxB_PLUS_PLUS_FP64 , GxB_TIMES_PLUS_FP64 , GxB_ANY_PLUS_FP64 , // semirings with multiply op: z = MINUS (x,y), all types x,y,z the same: GxB_MIN_MINUS_INT8 , GxB_MAX_MINUS_INT8 , GxB_PLUS_MINUS_INT8 , GxB_TIMES_MINUS_INT8 , GxB_ANY_MINUS_INT8 , GxB_MIN_MINUS_INT16 , GxB_MAX_MINUS_INT16 , GxB_PLUS_MINUS_INT16 , GxB_TIMES_MINUS_INT16 , GxB_ANY_MINUS_INT16 , GxB_MIN_MINUS_INT32 , GxB_MAX_MINUS_INT32 , GxB_PLUS_MINUS_INT32 , GxB_TIMES_MINUS_INT32 , GxB_ANY_MINUS_INT32 , GxB_MIN_MINUS_INT64 , GxB_MAX_MINUS_INT64 , GxB_PLUS_MINUS_INT64 , GxB_TIMES_MINUS_INT64 , GxB_ANY_MINUS_INT64 , GxB_MIN_MINUS_UINT8 , GxB_MAX_MINUS_UINT8 , GxB_PLUS_MINUS_UINT8 , GxB_TIMES_MINUS_UINT8 , GxB_ANY_MINUS_UINT8 , GxB_MIN_MINUS_UINT16 , GxB_MAX_MINUS_UINT16 , GxB_PLUS_MINUS_UINT16 , GxB_TIMES_MINUS_UINT16 , GxB_ANY_MINUS_UINT16 , GxB_MIN_MINUS_UINT32 , GxB_MAX_MINUS_UINT32 , GxB_PLUS_MINUS_UINT32 , GxB_TIMES_MINUS_UINT32 , GxB_ANY_MINUS_UINT32 , GxB_MIN_MINUS_UINT64 , GxB_MAX_MINUS_UINT64 , GxB_PLUS_MINUS_UINT64 , GxB_TIMES_MINUS_UINT64 , GxB_ANY_MINUS_UINT64 , GxB_MIN_MINUS_FP32 , GxB_MAX_MINUS_FP32 , GxB_PLUS_MINUS_FP32 , GxB_TIMES_MINUS_FP32 , GxB_ANY_MINUS_FP32 , GxB_MIN_MINUS_FP64 , GxB_MAX_MINUS_FP64 , GxB_PLUS_MINUS_FP64 , GxB_TIMES_MINUS_FP64 , GxB_ANY_MINUS_FP64 , // semirings with multiply op: z = TIMES (x,y), all types x,y,z the same: GxB_MIN_TIMES_INT8 , GxB_MAX_TIMES_INT8 , GxB_PLUS_TIMES_INT8 , GxB_TIMES_TIMES_INT8 , GxB_ANY_TIMES_INT8 , GxB_MIN_TIMES_INT16 , GxB_MAX_TIMES_INT16 , GxB_PLUS_TIMES_INT16 , GxB_TIMES_TIMES_INT16 , GxB_ANY_TIMES_INT16 , GxB_MIN_TIMES_INT32 , GxB_MAX_TIMES_INT32 , GxB_PLUS_TIMES_INT32 , GxB_TIMES_TIMES_INT32 , GxB_ANY_TIMES_INT32 , GxB_MIN_TIMES_INT64 , GxB_MAX_TIMES_INT64 , GxB_PLUS_TIMES_INT64 , GxB_TIMES_TIMES_INT64 , GxB_ANY_TIMES_INT64 , GxB_MIN_TIMES_UINT8 , GxB_MAX_TIMES_UINT8 , GxB_PLUS_TIMES_UINT8 , GxB_TIMES_TIMES_UINT8 , GxB_ANY_TIMES_UINT8 , GxB_MIN_TIMES_UINT16 , GxB_MAX_TIMES_UINT16 , GxB_PLUS_TIMES_UINT16 , GxB_TIMES_TIMES_UINT16 , GxB_ANY_TIMES_UINT16 , GxB_MIN_TIMES_UINT32 , GxB_MAX_TIMES_UINT32 , GxB_PLUS_TIMES_UINT32 , GxB_TIMES_TIMES_UINT32 , GxB_ANY_TIMES_UINT32 , GxB_MIN_TIMES_UINT64 , GxB_MAX_TIMES_UINT64 , GxB_PLUS_TIMES_UINT64 , GxB_TIMES_TIMES_UINT64 , GxB_ANY_TIMES_UINT64 , GxB_MIN_TIMES_FP32 , GxB_MAX_TIMES_FP32 , GxB_PLUS_TIMES_FP32 , GxB_TIMES_TIMES_FP32 , GxB_ANY_TIMES_FP32 , GxB_MIN_TIMES_FP64 , GxB_MAX_TIMES_FP64 , GxB_PLUS_TIMES_FP64 , GxB_TIMES_TIMES_FP64 , GxB_ANY_TIMES_FP64 , // semirings with multiply op: z = DIV (x,y), all types x,y,z the same: GxB_MIN_DIV_INT8 , GxB_MAX_DIV_INT8 , GxB_PLUS_DIV_INT8 , GxB_TIMES_DIV_INT8 , GxB_ANY_DIV_INT8 , GxB_MIN_DIV_INT16 , GxB_MAX_DIV_INT16 , GxB_PLUS_DIV_INT16 , GxB_TIMES_DIV_INT16 , GxB_ANY_DIV_INT16 , GxB_MIN_DIV_INT32 , GxB_MAX_DIV_INT32 , GxB_PLUS_DIV_INT32 , GxB_TIMES_DIV_INT32 , GxB_ANY_DIV_INT32 , GxB_MIN_DIV_INT64 , GxB_MAX_DIV_INT64 , GxB_PLUS_DIV_INT64 , GxB_TIMES_DIV_INT64 , GxB_ANY_DIV_INT64 , GxB_MIN_DIV_UINT8 , GxB_MAX_DIV_UINT8 , GxB_PLUS_DIV_UINT8 , GxB_TIMES_DIV_UINT8 , GxB_ANY_DIV_UINT8 , GxB_MIN_DIV_UINT16 , GxB_MAX_DIV_UINT16 , GxB_PLUS_DIV_UINT16 , GxB_TIMES_DIV_UINT16 , GxB_ANY_DIV_UINT16 , GxB_MIN_DIV_UINT32 , GxB_MAX_DIV_UINT32 , GxB_PLUS_DIV_UINT32 , GxB_TIMES_DIV_UINT32 , GxB_ANY_DIV_UINT32 , GxB_MIN_DIV_UINT64 , GxB_MAX_DIV_UINT64 , GxB_PLUS_DIV_UINT64 , GxB_TIMES_DIV_UINT64 , GxB_ANY_DIV_UINT64 , GxB_MIN_DIV_FP32 , GxB_MAX_DIV_FP32 , GxB_PLUS_DIV_FP32 , GxB_TIMES_DIV_FP32 , GxB_ANY_DIV_FP32 , GxB_MIN_DIV_FP64 , GxB_MAX_DIV_FP64 , GxB_PLUS_DIV_FP64 , GxB_TIMES_DIV_FP64 , GxB_ANY_DIV_FP64 , // semirings with multiply op: z = RDIV (x,y), all types x,y,z the same: GxB_MIN_RDIV_INT8 , GxB_MAX_RDIV_INT8 , GxB_PLUS_RDIV_INT8 , GxB_TIMES_RDIV_INT8 , GxB_ANY_RDIV_INT8 , GxB_MIN_RDIV_INT16 , GxB_MAX_RDIV_INT16 , GxB_PLUS_RDIV_INT16 , GxB_TIMES_RDIV_INT16 , GxB_ANY_RDIV_INT16 , GxB_MIN_RDIV_INT32 , GxB_MAX_RDIV_INT32 , GxB_PLUS_RDIV_INT32 , GxB_TIMES_RDIV_INT32 , GxB_ANY_RDIV_INT32 , GxB_MIN_RDIV_INT64 , GxB_MAX_RDIV_INT64 , GxB_PLUS_RDIV_INT64 , GxB_TIMES_RDIV_INT64 , GxB_ANY_RDIV_INT64 , GxB_MIN_RDIV_UINT8 , GxB_MAX_RDIV_UINT8 , GxB_PLUS_RDIV_UINT8 , GxB_TIMES_RDIV_UINT8 , GxB_ANY_RDIV_UINT8 , GxB_MIN_RDIV_UINT16 , GxB_MAX_RDIV_UINT16 , GxB_PLUS_RDIV_UINT16 , GxB_TIMES_RDIV_UINT16 , GxB_ANY_RDIV_UINT16 , GxB_MIN_RDIV_UINT32 , GxB_MAX_RDIV_UINT32 , GxB_PLUS_RDIV_UINT32 , GxB_TIMES_RDIV_UINT32 , GxB_ANY_RDIV_UINT32 , GxB_MIN_RDIV_UINT64 , GxB_MAX_RDIV_UINT64 , GxB_PLUS_RDIV_UINT64 , GxB_TIMES_RDIV_UINT64 , GxB_ANY_RDIV_UINT64 , GxB_MIN_RDIV_FP32 , GxB_MAX_RDIV_FP32 , GxB_PLUS_RDIV_FP32 , GxB_TIMES_RDIV_FP32 , GxB_ANY_RDIV_FP32 , GxB_MIN_RDIV_FP64 , GxB_MAX_RDIV_FP64 , GxB_PLUS_RDIV_FP64 , GxB_TIMES_RDIV_FP64 , GxB_ANY_RDIV_FP64 , // semirings with multiply op: z = RMINUS (x,y), all types x,y,z the same: GxB_MIN_RMINUS_INT8 , GxB_MAX_RMINUS_INT8 , GxB_PLUS_RMINUS_INT8 , GxB_TIMES_RMINUS_INT8 , GxB_ANY_RMINUS_INT8 , GxB_MIN_RMINUS_INT16 , GxB_MAX_RMINUS_INT16 , GxB_PLUS_RMINUS_INT16 , GxB_TIMES_RMINUS_INT16 , GxB_ANY_RMINUS_INT16 , GxB_MIN_RMINUS_INT32 , GxB_MAX_RMINUS_INT32 , GxB_PLUS_RMINUS_INT32 , GxB_TIMES_RMINUS_INT32 , GxB_ANY_RMINUS_INT32 , GxB_MIN_RMINUS_INT64 , GxB_MAX_RMINUS_INT64 , GxB_PLUS_RMINUS_INT64 , GxB_TIMES_RMINUS_INT64 , GxB_ANY_RMINUS_INT64 , GxB_MIN_RMINUS_UINT8 , GxB_MAX_RMINUS_UINT8 , GxB_PLUS_RMINUS_UINT8 , GxB_TIMES_RMINUS_UINT8 , GxB_ANY_RMINUS_UINT8 , GxB_MIN_RMINUS_UINT16 , GxB_MAX_RMINUS_UINT16 , GxB_PLUS_RMINUS_UINT16 , GxB_TIMES_RMINUS_UINT16, GxB_ANY_RMINUS_UINT16 , GxB_MIN_RMINUS_UINT32 , GxB_MAX_RMINUS_UINT32 , GxB_PLUS_RMINUS_UINT32 , GxB_TIMES_RMINUS_UINT32, GxB_ANY_RMINUS_UINT32 , GxB_MIN_RMINUS_UINT64 , GxB_MAX_RMINUS_UINT64 , GxB_PLUS_RMINUS_UINT64 , GxB_TIMES_RMINUS_UINT64, GxB_ANY_RMINUS_UINT64 , GxB_MIN_RMINUS_FP32 , GxB_MAX_RMINUS_FP32 , GxB_PLUS_RMINUS_FP32 , GxB_TIMES_RMINUS_FP32 , GxB_ANY_RMINUS_FP32 , GxB_MIN_RMINUS_FP64 , GxB_MAX_RMINUS_FP64 , GxB_PLUS_RMINUS_FP64 , GxB_TIMES_RMINUS_FP64 , GxB_ANY_RMINUS_FP64 , // semirings with multiply op: z = ISEQ (x,y), all types x,y,z the same: GxB_MIN_ISEQ_INT8 , GxB_MAX_ISEQ_INT8 , GxB_PLUS_ISEQ_INT8 , GxB_TIMES_ISEQ_INT8 , GxB_ANY_ISEQ_INT8 , GxB_MIN_ISEQ_INT16 , GxB_MAX_ISEQ_INT16 , GxB_PLUS_ISEQ_INT16 , GxB_TIMES_ISEQ_INT16 , GxB_ANY_ISEQ_INT16 , GxB_MIN_ISEQ_INT32 , GxB_MAX_ISEQ_INT32 , GxB_PLUS_ISEQ_INT32 , GxB_TIMES_ISEQ_INT32 , GxB_ANY_ISEQ_INT32 , GxB_MIN_ISEQ_INT64 , GxB_MAX_ISEQ_INT64 , GxB_PLUS_ISEQ_INT64 , GxB_TIMES_ISEQ_INT64 , GxB_ANY_ISEQ_INT64 , GxB_MIN_ISEQ_UINT8 , GxB_MAX_ISEQ_UINT8 , GxB_PLUS_ISEQ_UINT8 , GxB_TIMES_ISEQ_UINT8 , GxB_ANY_ISEQ_UINT8 , GxB_MIN_ISEQ_UINT16 , GxB_MAX_ISEQ_UINT16 , GxB_PLUS_ISEQ_UINT16 , GxB_TIMES_ISEQ_UINT16 , GxB_ANY_ISEQ_UINT16 , GxB_MIN_ISEQ_UINT32 , GxB_MAX_ISEQ_UINT32 , GxB_PLUS_ISEQ_UINT32 , GxB_TIMES_ISEQ_UINT32 , GxB_ANY_ISEQ_UINT32 , GxB_MIN_ISEQ_UINT64 , GxB_MAX_ISEQ_UINT64 , GxB_PLUS_ISEQ_UINT64 , GxB_TIMES_ISEQ_UINT64 , GxB_ANY_ISEQ_UINT64 , GxB_MIN_ISEQ_FP32 , GxB_MAX_ISEQ_FP32 , GxB_PLUS_ISEQ_FP32 , GxB_TIMES_ISEQ_FP32 , GxB_ANY_ISEQ_FP32 , GxB_MIN_ISEQ_FP64 , GxB_MAX_ISEQ_FP64 , GxB_PLUS_ISEQ_FP64 , GxB_TIMES_ISEQ_FP64 , GxB_ANY_ISEQ_FP64 , // semirings with multiply op: z = ISNE (x,y), all types x,y,z the same: GxB_MIN_ISNE_INT8 , GxB_MAX_ISNE_INT8 , GxB_PLUS_ISNE_INT8 , GxB_TIMES_ISNE_INT8 , GxB_ANY_ISNE_INT8 , GxB_MIN_ISNE_INT16 , GxB_MAX_ISNE_INT16 , GxB_PLUS_ISNE_INT16 , GxB_TIMES_ISNE_INT16 , GxB_ANY_ISNE_INT16 , GxB_MIN_ISNE_INT32 , GxB_MAX_ISNE_INT32 , GxB_PLUS_ISNE_INT32 , GxB_TIMES_ISNE_INT32 , GxB_ANY_ISNE_INT32 , GxB_MIN_ISNE_INT64 , GxB_MAX_ISNE_INT64 , GxB_PLUS_ISNE_INT64 , GxB_TIMES_ISNE_INT64 , GxB_ANY_ISNE_INT64 , GxB_MIN_ISNE_UINT8 , GxB_MAX_ISNE_UINT8 , GxB_PLUS_ISNE_UINT8 , GxB_TIMES_ISNE_UINT8 , GxB_ANY_ISNE_UINT8 , GxB_MIN_ISNE_UINT16 , GxB_MAX_ISNE_UINT16 , GxB_PLUS_ISNE_UINT16 , GxB_TIMES_ISNE_UINT16 , GxB_ANY_ISNE_UINT16 , GxB_MIN_ISNE_UINT32 , GxB_MAX_ISNE_UINT32 , GxB_PLUS_ISNE_UINT32 , GxB_TIMES_ISNE_UINT32 , GxB_ANY_ISNE_UINT32 , GxB_MIN_ISNE_UINT64 , GxB_MAX_ISNE_UINT64 , GxB_PLUS_ISNE_UINT64 , GxB_TIMES_ISNE_UINT64 , GxB_ANY_ISNE_UINT64 , GxB_MIN_ISNE_FP32 , GxB_MAX_ISNE_FP32 , GxB_PLUS_ISNE_FP32 , GxB_TIMES_ISNE_FP32 , GxB_ANY_ISNE_FP32 , GxB_MIN_ISNE_FP64 , GxB_MAX_ISNE_FP64 , GxB_PLUS_ISNE_FP64 , GxB_TIMES_ISNE_FP64 , GxB_ANY_ISNE_FP64 , // semirings with multiply op: z = ISGT (x,y), all types x,y,z the same: GxB_MIN_ISGT_INT8 , GxB_MAX_ISGT_INT8 , GxB_PLUS_ISGT_INT8 , GxB_TIMES_ISGT_INT8 , GxB_ANY_ISGT_INT8 , GxB_MIN_ISGT_INT16 , GxB_MAX_ISGT_INT16 , GxB_PLUS_ISGT_INT16 , GxB_TIMES_ISGT_INT16 , GxB_ANY_ISGT_INT16 , GxB_MIN_ISGT_INT32 , GxB_MAX_ISGT_INT32 , GxB_PLUS_ISGT_INT32 , GxB_TIMES_ISGT_INT32 , GxB_ANY_ISGT_INT32 , GxB_MIN_ISGT_INT64 , GxB_MAX_ISGT_INT64 , GxB_PLUS_ISGT_INT64 , GxB_TIMES_ISGT_INT64 , GxB_ANY_ISGT_INT64 , GxB_MIN_ISGT_UINT8 , GxB_MAX_ISGT_UINT8 , GxB_PLUS_ISGT_UINT8 , GxB_TIMES_ISGT_UINT8 , GxB_ANY_ISGT_UINT8 , GxB_MIN_ISGT_UINT16 , GxB_MAX_ISGT_UINT16 , GxB_PLUS_ISGT_UINT16 , GxB_TIMES_ISGT_UINT16 , GxB_ANY_ISGT_UINT16 , GxB_MIN_ISGT_UINT32 , GxB_MAX_ISGT_UINT32 , GxB_PLUS_ISGT_UINT32 , GxB_TIMES_ISGT_UINT32 , GxB_ANY_ISGT_UINT32 , GxB_MIN_ISGT_UINT64 , GxB_MAX_ISGT_UINT64 , GxB_PLUS_ISGT_UINT64 , GxB_TIMES_ISGT_UINT64 , GxB_ANY_ISGT_UINT64 , GxB_MIN_ISGT_FP32 , GxB_MAX_ISGT_FP32 , GxB_PLUS_ISGT_FP32 , GxB_TIMES_ISGT_FP32 , GxB_ANY_ISGT_FP32 , GxB_MIN_ISGT_FP64 , GxB_MAX_ISGT_FP64 , GxB_PLUS_ISGT_FP64 , GxB_TIMES_ISGT_FP64 , GxB_ANY_ISGT_FP64 , // semirings with multiply op: z = ISLT (x,y), all types x,y,z the same: GxB_MIN_ISLT_INT8 , GxB_MAX_ISLT_INT8 , GxB_PLUS_ISLT_INT8 , GxB_TIMES_ISLT_INT8 , GxB_ANY_ISLT_INT8 , GxB_MIN_ISLT_INT16 , GxB_MAX_ISLT_INT16 , GxB_PLUS_ISLT_INT16 , GxB_TIMES_ISLT_INT16 , GxB_ANY_ISLT_INT16 , GxB_MIN_ISLT_INT32 , GxB_MAX_ISLT_INT32 , GxB_PLUS_ISLT_INT32 , GxB_TIMES_ISLT_INT32 , GxB_ANY_ISLT_INT32 , GxB_MIN_ISLT_INT64 , GxB_MAX_ISLT_INT64 , GxB_PLUS_ISLT_INT64 , GxB_TIMES_ISLT_INT64 , GxB_ANY_ISLT_INT64 , GxB_MIN_ISLT_UINT8 , GxB_MAX_ISLT_UINT8 , GxB_PLUS_ISLT_UINT8 , GxB_TIMES_ISLT_UINT8 , GxB_ANY_ISLT_UINT8 , GxB_MIN_ISLT_UINT16 , GxB_MAX_ISLT_UINT16 , GxB_PLUS_ISLT_UINT16 , GxB_TIMES_ISLT_UINT16 , GxB_ANY_ISLT_UINT16 , GxB_MIN_ISLT_UINT32 , GxB_MAX_ISLT_UINT32 , GxB_PLUS_ISLT_UINT32 , GxB_TIMES_ISLT_UINT32 , GxB_ANY_ISLT_UINT32 , GxB_MIN_ISLT_UINT64 , GxB_MAX_ISLT_UINT64 , GxB_PLUS_ISLT_UINT64 , GxB_TIMES_ISLT_UINT64 , GxB_ANY_ISLT_UINT64 , GxB_MIN_ISLT_FP32 , GxB_MAX_ISLT_FP32 , GxB_PLUS_ISLT_FP32 , GxB_TIMES_ISLT_FP32 , GxB_ANY_ISLT_FP32 , GxB_MIN_ISLT_FP64 , GxB_MAX_ISLT_FP64 , GxB_PLUS_ISLT_FP64 , GxB_TIMES_ISLT_FP64 , GxB_ANY_ISLT_FP64 , // semirings with multiply op: z = ISGE (x,y), all types x,y,z the same: GxB_MIN_ISGE_INT8 , GxB_MAX_ISGE_INT8 , GxB_PLUS_ISGE_INT8 , GxB_TIMES_ISGE_INT8 , GxB_ANY_ISGE_INT8 , GxB_MIN_ISGE_INT16 , GxB_MAX_ISGE_INT16 , GxB_PLUS_ISGE_INT16 , GxB_TIMES_ISGE_INT16 , GxB_ANY_ISGE_INT16 , GxB_MIN_ISGE_INT32 , GxB_MAX_ISGE_INT32 , GxB_PLUS_ISGE_INT32 , GxB_TIMES_ISGE_INT32 , GxB_ANY_ISGE_INT32 , GxB_MIN_ISGE_INT64 , GxB_MAX_ISGE_INT64 , GxB_PLUS_ISGE_INT64 , GxB_TIMES_ISGE_INT64 , GxB_ANY_ISGE_INT64 , GxB_MIN_ISGE_UINT8 , GxB_MAX_ISGE_UINT8 , GxB_PLUS_ISGE_UINT8 , GxB_TIMES_ISGE_UINT8 , GxB_ANY_ISGE_UINT8 , GxB_MIN_ISGE_UINT16 , GxB_MAX_ISGE_UINT16 , GxB_PLUS_ISGE_UINT16 , GxB_TIMES_ISGE_UINT16 , GxB_ANY_ISGE_UINT16 , GxB_MIN_ISGE_UINT32 , GxB_MAX_ISGE_UINT32 , GxB_PLUS_ISGE_UINT32 , GxB_TIMES_ISGE_UINT32 , GxB_ANY_ISGE_UINT32 , GxB_MIN_ISGE_UINT64 , GxB_MAX_ISGE_UINT64 , GxB_PLUS_ISGE_UINT64 , GxB_TIMES_ISGE_UINT64 , GxB_ANY_ISGE_UINT64 , GxB_MIN_ISGE_FP32 , GxB_MAX_ISGE_FP32 , GxB_PLUS_ISGE_FP32 , GxB_TIMES_ISGE_FP32 , GxB_ANY_ISGE_FP32 , GxB_MIN_ISGE_FP64 , GxB_MAX_ISGE_FP64 , GxB_PLUS_ISGE_FP64 , GxB_TIMES_ISGE_FP64 , GxB_ANY_ISGE_FP64 , // semirings with multiply op: z = ISLE (x,y), all types x,y,z the same: GxB_MIN_ISLE_INT8 , GxB_MAX_ISLE_INT8 , GxB_PLUS_ISLE_INT8 , GxB_TIMES_ISLE_INT8 , GxB_ANY_ISLE_INT8 , GxB_MIN_ISLE_INT16 , GxB_MAX_ISLE_INT16 , GxB_PLUS_ISLE_INT16 , GxB_TIMES_ISLE_INT16 , GxB_ANY_ISLE_INT16 , GxB_MIN_ISLE_INT32 , GxB_MAX_ISLE_INT32 , GxB_PLUS_ISLE_INT32 , GxB_TIMES_ISLE_INT32 , GxB_ANY_ISLE_INT32 , GxB_MIN_ISLE_INT64 , GxB_MAX_ISLE_INT64 , GxB_PLUS_ISLE_INT64 , GxB_TIMES_ISLE_INT64 , GxB_ANY_ISLE_INT64 , GxB_MIN_ISLE_UINT8 , GxB_MAX_ISLE_UINT8 , GxB_PLUS_ISLE_UINT8 , GxB_TIMES_ISLE_UINT8 , GxB_ANY_ISLE_UINT8 , GxB_MIN_ISLE_UINT16 , GxB_MAX_ISLE_UINT16 , GxB_PLUS_ISLE_UINT16 , GxB_TIMES_ISLE_UINT16 , GxB_ANY_ISLE_UINT16 , GxB_MIN_ISLE_UINT32 , GxB_MAX_ISLE_UINT32 , GxB_PLUS_ISLE_UINT32 , GxB_TIMES_ISLE_UINT32 , GxB_ANY_ISLE_UINT32 , GxB_MIN_ISLE_UINT64 , GxB_MAX_ISLE_UINT64 , GxB_PLUS_ISLE_UINT64 , GxB_TIMES_ISLE_UINT64 , GxB_ANY_ISLE_UINT64 , GxB_MIN_ISLE_FP32 , GxB_MAX_ISLE_FP32 , GxB_PLUS_ISLE_FP32 , GxB_TIMES_ISLE_FP32 , GxB_ANY_ISLE_FP32 , GxB_MIN_ISLE_FP64 , GxB_MAX_ISLE_FP64 , GxB_PLUS_ISLE_FP64 , GxB_TIMES_ISLE_FP64 , GxB_ANY_ISLE_FP64 , // semirings with multiply op: z = LOR (x,y), all types x,y,z the same: GxB_MIN_LOR_INT8 , GxB_MAX_LOR_INT8 , GxB_PLUS_LOR_INT8 , GxB_TIMES_LOR_INT8 , GxB_ANY_LOR_INT8 , GxB_MIN_LOR_INT16 , GxB_MAX_LOR_INT16 , GxB_PLUS_LOR_INT16 , GxB_TIMES_LOR_INT16 , GxB_ANY_LOR_INT16 , GxB_MIN_LOR_INT32 , GxB_MAX_LOR_INT32 , GxB_PLUS_LOR_INT32 , GxB_TIMES_LOR_INT32 , GxB_ANY_LOR_INT32 , GxB_MIN_LOR_INT64 , GxB_MAX_LOR_INT64 , GxB_PLUS_LOR_INT64 , GxB_TIMES_LOR_INT64 , GxB_ANY_LOR_INT64 , GxB_MIN_LOR_UINT8 , GxB_MAX_LOR_UINT8 , GxB_PLUS_LOR_UINT8 , GxB_TIMES_LOR_UINT8 , GxB_ANY_LOR_UINT8 , GxB_MIN_LOR_UINT16 , GxB_MAX_LOR_UINT16 , GxB_PLUS_LOR_UINT16 , GxB_TIMES_LOR_UINT16 , GxB_ANY_LOR_UINT16 , GxB_MIN_LOR_UINT32 , GxB_MAX_LOR_UINT32 , GxB_PLUS_LOR_UINT32 , GxB_TIMES_LOR_UINT32 , GxB_ANY_LOR_UINT32 , GxB_MIN_LOR_UINT64 , GxB_MAX_LOR_UINT64 , GxB_PLUS_LOR_UINT64 , GxB_TIMES_LOR_UINT64 , GxB_ANY_LOR_UINT64 , GxB_MIN_LOR_FP32 , GxB_MAX_LOR_FP32 , GxB_PLUS_LOR_FP32 , GxB_TIMES_LOR_FP32 , GxB_ANY_LOR_FP32 , GxB_MIN_LOR_FP64 , GxB_MAX_LOR_FP64 , GxB_PLUS_LOR_FP64 , GxB_TIMES_LOR_FP64 , GxB_ANY_LOR_FP64 , // semirings with multiply op: z = LAND (x,y), all types x,y,z the same: GxB_MIN_LAND_INT8 , GxB_MAX_LAND_INT8 , GxB_PLUS_LAND_INT8 , GxB_TIMES_LAND_INT8 , GxB_ANY_LAND_INT8 , GxB_MIN_LAND_INT16 , GxB_MAX_LAND_INT16 , GxB_PLUS_LAND_INT16 , GxB_TIMES_LAND_INT16 , GxB_ANY_LAND_INT16 , GxB_MIN_LAND_INT32 , GxB_MAX_LAND_INT32 , GxB_PLUS_LAND_INT32 , GxB_TIMES_LAND_INT32 , GxB_ANY_LAND_INT32 , GxB_MIN_LAND_INT64 , GxB_MAX_LAND_INT64 , GxB_PLUS_LAND_INT64 , GxB_TIMES_LAND_INT64 , GxB_ANY_LAND_INT64 , GxB_MIN_LAND_UINT8 , GxB_MAX_LAND_UINT8 , GxB_PLUS_LAND_UINT8 , GxB_TIMES_LAND_UINT8 , GxB_ANY_LAND_UINT8 , GxB_MIN_LAND_UINT16 , GxB_MAX_LAND_UINT16 , GxB_PLUS_LAND_UINT16 , GxB_TIMES_LAND_UINT16 , GxB_ANY_LAND_UINT16 , GxB_MIN_LAND_UINT32 , GxB_MAX_LAND_UINT32 , GxB_PLUS_LAND_UINT32 , GxB_TIMES_LAND_UINT32 , GxB_ANY_LAND_UINT32 , GxB_MIN_LAND_UINT64 , GxB_MAX_LAND_UINT64 , GxB_PLUS_LAND_UINT64 , GxB_TIMES_LAND_UINT64 , GxB_ANY_LAND_UINT64 , GxB_MIN_LAND_FP32 , GxB_MAX_LAND_FP32 , GxB_PLUS_LAND_FP32 , GxB_TIMES_LAND_FP32 , GxB_ANY_LAND_FP32 , GxB_MIN_LAND_FP64 , GxB_MAX_LAND_FP64 , GxB_PLUS_LAND_FP64 , GxB_TIMES_LAND_FP64 , GxB_ANY_LAND_FP64 , // semirings with multiply op: z = LXOR (x,y), all types x,y,z the same: GxB_MIN_LXOR_INT8 , GxB_MAX_LXOR_INT8 , GxB_PLUS_LXOR_INT8 , GxB_TIMES_LXOR_INT8 , GxB_ANY_LXOR_INT8 , GxB_MIN_LXOR_INT16 , GxB_MAX_LXOR_INT16 , GxB_PLUS_LXOR_INT16 , GxB_TIMES_LXOR_INT16 , GxB_ANY_LXOR_INT16 , GxB_MIN_LXOR_INT32 , GxB_MAX_LXOR_INT32 , GxB_PLUS_LXOR_INT32 , GxB_TIMES_LXOR_INT32 , GxB_ANY_LXOR_INT32 , GxB_MIN_LXOR_INT64 , GxB_MAX_LXOR_INT64 , GxB_PLUS_LXOR_INT64 , GxB_TIMES_LXOR_INT64 , GxB_ANY_LXOR_INT64 , GxB_MIN_LXOR_UINT8 , GxB_MAX_LXOR_UINT8 , GxB_PLUS_LXOR_UINT8 , GxB_TIMES_LXOR_UINT8 , GxB_ANY_LXOR_UINT8 , GxB_MIN_LXOR_UINT16 , GxB_MAX_LXOR_UINT16 , GxB_PLUS_LXOR_UINT16 , GxB_TIMES_LXOR_UINT16 , GxB_ANY_LXOR_UINT16 , GxB_MIN_LXOR_UINT32 , GxB_MAX_LXOR_UINT32 , GxB_PLUS_LXOR_UINT32 , GxB_TIMES_LXOR_UINT32 , GxB_ANY_LXOR_UINT32 , GxB_MIN_LXOR_UINT64 , GxB_MAX_LXOR_UINT64 , GxB_PLUS_LXOR_UINT64 , GxB_TIMES_LXOR_UINT64 , GxB_ANY_LXOR_UINT64 , GxB_MIN_LXOR_FP32 , GxB_MAX_LXOR_FP32 , GxB_PLUS_LXOR_FP32 , GxB_TIMES_LXOR_FP32 , GxB_ANY_LXOR_FP32 , GxB_MIN_LXOR_FP64 , GxB_MAX_LXOR_FP64 , GxB_PLUS_LXOR_FP64 , GxB_TIMES_LXOR_FP64 , GxB_ANY_LXOR_FP64 , //------------------------------------------------------------------------------ // 300 semirings with a comparator TxT -> bool, where T is non-Boolean //------------------------------------------------------------------------------ // In the 4th column the GxB_EQ_*_* semirings could also be called // GxB_LXNOR_*_*, since the EQ and LXNOR boolean operators are identical // but those names are not included. // semirings with multiply op: z = EQ (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_EQ_INT8 , GxB_LAND_EQ_INT8 , GxB_LXOR_EQ_INT8 , GxB_EQ_EQ_INT8 , GxB_ANY_EQ_INT8 , GxB_LOR_EQ_INT16 , GxB_LAND_EQ_INT16 , GxB_LXOR_EQ_INT16 , GxB_EQ_EQ_INT16 , GxB_ANY_EQ_INT16 , GxB_LOR_EQ_INT32 , GxB_LAND_EQ_INT32 , GxB_LXOR_EQ_INT32 , GxB_EQ_EQ_INT32 , GxB_ANY_EQ_INT32 , GxB_LOR_EQ_INT64 , GxB_LAND_EQ_INT64 , GxB_LXOR_EQ_INT64 , GxB_EQ_EQ_INT64 , GxB_ANY_EQ_INT64 , GxB_LOR_EQ_UINT8 , GxB_LAND_EQ_UINT8 , GxB_LXOR_EQ_UINT8 , GxB_EQ_EQ_UINT8 , GxB_ANY_EQ_UINT8 , GxB_LOR_EQ_UINT16 , GxB_LAND_EQ_UINT16 , GxB_LXOR_EQ_UINT16 , GxB_EQ_EQ_UINT16 , GxB_ANY_EQ_UINT16 , GxB_LOR_EQ_UINT32 , GxB_LAND_EQ_UINT32 , GxB_LXOR_EQ_UINT32 , GxB_EQ_EQ_UINT32 , GxB_ANY_EQ_UINT32 , GxB_LOR_EQ_UINT64 , GxB_LAND_EQ_UINT64 , GxB_LXOR_EQ_UINT64 , GxB_EQ_EQ_UINT64 , GxB_ANY_EQ_UINT64 , GxB_LOR_EQ_FP32 , GxB_LAND_EQ_FP32 , GxB_LXOR_EQ_FP32 , GxB_EQ_EQ_FP32 , GxB_ANY_EQ_FP32 , GxB_LOR_EQ_FP64 , GxB_LAND_EQ_FP64 , GxB_LXOR_EQ_FP64 , GxB_EQ_EQ_FP64 , GxB_ANY_EQ_FP64 , // semirings with multiply op: z = NE (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_NE_INT8 , GxB_LAND_NE_INT8 , GxB_LXOR_NE_INT8 , GxB_EQ_NE_INT8 , GxB_ANY_NE_INT8 , GxB_LOR_NE_INT16 , GxB_LAND_NE_INT16 , GxB_LXOR_NE_INT16 , GxB_EQ_NE_INT16 , GxB_ANY_NE_INT16 , GxB_LOR_NE_INT32 , GxB_LAND_NE_INT32 , GxB_LXOR_NE_INT32 , GxB_EQ_NE_INT32 , GxB_ANY_NE_INT32 , GxB_LOR_NE_INT64 , GxB_LAND_NE_INT64 , GxB_LXOR_NE_INT64 , GxB_EQ_NE_INT64 , GxB_ANY_NE_INT64 , GxB_LOR_NE_UINT8 , GxB_LAND_NE_UINT8 , GxB_LXOR_NE_UINT8 , GxB_EQ_NE_UINT8 , GxB_ANY_NE_UINT8 , GxB_LOR_NE_UINT16 , GxB_LAND_NE_UINT16 , GxB_LXOR_NE_UINT16 , GxB_EQ_NE_UINT16 , GxB_ANY_NE_UINT16 , GxB_LOR_NE_UINT32 , GxB_LAND_NE_UINT32 , GxB_LXOR_NE_UINT32 , GxB_EQ_NE_UINT32 , GxB_ANY_NE_UINT32 , GxB_LOR_NE_UINT64 , GxB_LAND_NE_UINT64 , GxB_LXOR_NE_UINT64 , GxB_EQ_NE_UINT64 , GxB_ANY_NE_UINT64 , GxB_LOR_NE_FP32 , GxB_LAND_NE_FP32 , GxB_LXOR_NE_FP32 , GxB_EQ_NE_FP32 , GxB_ANY_NE_FP32 , GxB_LOR_NE_FP64 , GxB_LAND_NE_FP64 , GxB_LXOR_NE_FP64 , GxB_EQ_NE_FP64 , GxB_ANY_NE_FP64 , // semirings with multiply op: z = GT (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_GT_INT8 , GxB_LAND_GT_INT8 , GxB_LXOR_GT_INT8 , GxB_EQ_GT_INT8 , GxB_ANY_GT_INT8 , GxB_LOR_GT_INT16 , GxB_LAND_GT_INT16 , GxB_LXOR_GT_INT16 , GxB_EQ_GT_INT16 , GxB_ANY_GT_INT16 , GxB_LOR_GT_INT32 , GxB_LAND_GT_INT32 , GxB_LXOR_GT_INT32 , GxB_EQ_GT_INT32 , GxB_ANY_GT_INT32 , GxB_LOR_GT_INT64 , GxB_LAND_GT_INT64 , GxB_LXOR_GT_INT64 , GxB_EQ_GT_INT64 , GxB_ANY_GT_INT64 , GxB_LOR_GT_UINT8 , GxB_LAND_GT_UINT8 , GxB_LXOR_GT_UINT8 , GxB_EQ_GT_UINT8 , GxB_ANY_GT_UINT8 , GxB_LOR_GT_UINT16 , GxB_LAND_GT_UINT16 , GxB_LXOR_GT_UINT16 , GxB_EQ_GT_UINT16 , GxB_ANY_GT_UINT16 , GxB_LOR_GT_UINT32 , GxB_LAND_GT_UINT32 , GxB_LXOR_GT_UINT32 , GxB_EQ_GT_UINT32 , GxB_ANY_GT_UINT32 , GxB_LOR_GT_UINT64 , GxB_LAND_GT_UINT64 , GxB_LXOR_GT_UINT64 , GxB_EQ_GT_UINT64 , GxB_ANY_GT_UINT64 , GxB_LOR_GT_FP32 , GxB_LAND_GT_FP32 , GxB_LXOR_GT_FP32 , GxB_EQ_GT_FP32 , GxB_ANY_GT_FP32 , GxB_LOR_GT_FP64 , GxB_LAND_GT_FP64 , GxB_LXOR_GT_FP64 , GxB_EQ_GT_FP64 , GxB_ANY_GT_FP64 , // semirings with multiply op: z = LT (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_LT_INT8 , GxB_LAND_LT_INT8 , GxB_LXOR_LT_INT8 , GxB_EQ_LT_INT8 , GxB_ANY_LT_INT8 , GxB_LOR_LT_INT16 , GxB_LAND_LT_INT16 , GxB_LXOR_LT_INT16 , GxB_EQ_LT_INT16 , GxB_ANY_LT_INT16 , GxB_LOR_LT_INT32 , GxB_LAND_LT_INT32 , GxB_LXOR_LT_INT32 , GxB_EQ_LT_INT32 , GxB_ANY_LT_INT32 , GxB_LOR_LT_INT64 , GxB_LAND_LT_INT64 , GxB_LXOR_LT_INT64 , GxB_EQ_LT_INT64 , GxB_ANY_LT_INT64 , GxB_LOR_LT_UINT8 , GxB_LAND_LT_UINT8 , GxB_LXOR_LT_UINT8 , GxB_EQ_LT_UINT8 , GxB_ANY_LT_UINT8 , GxB_LOR_LT_UINT16 , GxB_LAND_LT_UINT16 , GxB_LXOR_LT_UINT16 , GxB_EQ_LT_UINT16 , GxB_ANY_LT_UINT16 , GxB_LOR_LT_UINT32 , GxB_LAND_LT_UINT32 , GxB_LXOR_LT_UINT32 , GxB_EQ_LT_UINT32 , GxB_ANY_LT_UINT32 , GxB_LOR_LT_UINT64 , GxB_LAND_LT_UINT64 , GxB_LXOR_LT_UINT64 , GxB_EQ_LT_UINT64 , GxB_ANY_LT_UINT64 , GxB_LOR_LT_FP32 , GxB_LAND_LT_FP32 , GxB_LXOR_LT_FP32 , GxB_EQ_LT_FP32 , GxB_ANY_LT_FP32 , GxB_LOR_LT_FP64 , GxB_LAND_LT_FP64 , GxB_LXOR_LT_FP64 , GxB_EQ_LT_FP64 , GxB_ANY_LT_FP64 , // semirings with multiply op: z = GE (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_GE_INT8 , GxB_LAND_GE_INT8 , GxB_LXOR_GE_INT8 , GxB_EQ_GE_INT8 , GxB_ANY_GE_INT8 , GxB_LOR_GE_INT16 , GxB_LAND_GE_INT16 , GxB_LXOR_GE_INT16 , GxB_EQ_GE_INT16 , GxB_ANY_GE_INT16 , GxB_LOR_GE_INT32 , GxB_LAND_GE_INT32 , GxB_LXOR_GE_INT32 , GxB_EQ_GE_INT32 , GxB_ANY_GE_INT32 , GxB_LOR_GE_INT64 , GxB_LAND_GE_INT64 , GxB_LXOR_GE_INT64 , GxB_EQ_GE_INT64 , GxB_ANY_GE_INT64 , GxB_LOR_GE_UINT8 , GxB_LAND_GE_UINT8 , GxB_LXOR_GE_UINT8 , GxB_EQ_GE_UINT8 , GxB_ANY_GE_UINT8 , GxB_LOR_GE_UINT16 , GxB_LAND_GE_UINT16 , GxB_LXOR_GE_UINT16 , GxB_EQ_GE_UINT16 , GxB_ANY_GE_UINT16 , GxB_LOR_GE_UINT32 , GxB_LAND_GE_UINT32 , GxB_LXOR_GE_UINT32 , GxB_EQ_GE_UINT32 , GxB_ANY_GE_UINT32 , GxB_LOR_GE_UINT64 , GxB_LAND_GE_UINT64 , GxB_LXOR_GE_UINT64 , GxB_EQ_GE_UINT64 , GxB_ANY_GE_UINT64 , GxB_LOR_GE_FP32 , GxB_LAND_GE_FP32 , GxB_LXOR_GE_FP32 , GxB_EQ_GE_FP32 , GxB_ANY_GE_FP32 , GxB_LOR_GE_FP64 , GxB_LAND_GE_FP64 , GxB_LXOR_GE_FP64 , GxB_EQ_GE_FP64 , GxB_ANY_GE_FP64 , // semirings with multiply op: z = LE (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_LE_INT8 , GxB_LAND_LE_INT8 , GxB_LXOR_LE_INT8 , GxB_EQ_LE_INT8 , GxB_ANY_LE_INT8 , GxB_LOR_LE_INT16 , GxB_LAND_LE_INT16 , GxB_LXOR_LE_INT16 , GxB_EQ_LE_INT16 , GxB_ANY_LE_INT16 , GxB_LOR_LE_INT32 , GxB_LAND_LE_INT32 , GxB_LXOR_LE_INT32 , GxB_EQ_LE_INT32 , GxB_ANY_LE_INT32 , GxB_LOR_LE_INT64 , GxB_LAND_LE_INT64 , GxB_LXOR_LE_INT64 , GxB_EQ_LE_INT64 , GxB_ANY_LE_INT64 , GxB_LOR_LE_UINT8 , GxB_LAND_LE_UINT8 , GxB_LXOR_LE_UINT8 , GxB_EQ_LE_UINT8 , GxB_ANY_LE_UINT8 , GxB_LOR_LE_UINT16 , GxB_LAND_LE_UINT16 , GxB_LXOR_LE_UINT16 , GxB_EQ_LE_UINT16 , GxB_ANY_LE_UINT16 , GxB_LOR_LE_UINT32 , GxB_LAND_LE_UINT32 , GxB_LXOR_LE_UINT32 , GxB_EQ_LE_UINT32 , GxB_ANY_LE_UINT32 , GxB_LOR_LE_UINT64 , GxB_LAND_LE_UINT64 , GxB_LXOR_LE_UINT64 , GxB_EQ_LE_UINT64 , GxB_ANY_LE_UINT64 , GxB_LOR_LE_FP32 , GxB_LAND_LE_FP32 , GxB_LXOR_LE_FP32 , GxB_EQ_LE_FP32 , GxB_ANY_LE_FP32 , GxB_LOR_LE_FP64 , GxB_LAND_LE_FP64 , GxB_LXOR_LE_FP64 , GxB_EQ_LE_FP64 , GxB_ANY_LE_FP64 , //------------------------------------------------------------------------------ // 55 semirings with purely Boolean types, bool x bool -> bool //------------------------------------------------------------------------------ // Note that lor_pair, land_pair, and eq_pair are all identical to any_pair. // These 3 are marked below. GxB_EQ_*_BOOL could be called // GxB_LXNOR_*_BOOL, and GxB_*_EQ_BOOL could be called GxB_*_LXNOR_BOOL, // but those names are not included. // purely boolean semirings in the form GxB_(add monoid)_(multiply operator)_BOOL: GxB_LOR_FIRST_BOOL , GxB_LAND_FIRST_BOOL , GxB_LXOR_FIRST_BOOL , GxB_EQ_FIRST_BOOL , GxB_ANY_FIRST_BOOL , GxB_LOR_SECOND_BOOL , GxB_LAND_SECOND_BOOL , GxB_LXOR_SECOND_BOOL , GxB_EQ_SECOND_BOOL , GxB_ANY_SECOND_BOOL , GxB_LOR_PAIR_BOOL/**/ , GxB_LAND_PAIR_BOOL/**/ , GxB_LXOR_PAIR_BOOL , GxB_EQ_PAIR_BOOL/**/ , GxB_ANY_PAIR_BOOL , GxB_LOR_LOR_BOOL , GxB_LAND_LOR_BOOL , GxB_LXOR_LOR_BOOL , GxB_EQ_LOR_BOOL , GxB_ANY_LOR_BOOL , GxB_LOR_LAND_BOOL , GxB_LAND_LAND_BOOL , GxB_LXOR_LAND_BOOL , GxB_EQ_LAND_BOOL , GxB_ANY_LAND_BOOL , GxB_LOR_LXOR_BOOL , GxB_LAND_LXOR_BOOL , GxB_LXOR_LXOR_BOOL , GxB_EQ_LXOR_BOOL , GxB_ANY_LXOR_BOOL , GxB_LOR_EQ_BOOL , GxB_LAND_EQ_BOOL , GxB_LXOR_EQ_BOOL , GxB_EQ_EQ_BOOL , GxB_ANY_EQ_BOOL , GxB_LOR_GT_BOOL , GxB_LAND_GT_BOOL , GxB_LXOR_GT_BOOL , GxB_EQ_GT_BOOL , GxB_ANY_GT_BOOL , GxB_LOR_LT_BOOL , GxB_LAND_LT_BOOL , GxB_LXOR_LT_BOOL , GxB_EQ_LT_BOOL , GxB_ANY_LT_BOOL , GxB_LOR_GE_BOOL , GxB_LAND_GE_BOOL , GxB_LXOR_GE_BOOL , GxB_EQ_GE_BOOL , GxB_ANY_GE_BOOL , GxB_LOR_LE_BOOL , GxB_LAND_LE_BOOL , GxB_LXOR_LE_BOOL , GxB_EQ_LE_BOOL , GxB_ANY_LE_BOOL , //------------------------------------------------------------------------------ // 54 complex semirings //------------------------------------------------------------------------------ // 3 monoids (plus, times, any), 2 types (FC32 and FC64), and 9 // multiplicative operators. // Note that times_pair is identical to any_pair. // These 2 are marked below. GxB_PLUS_FIRST_FC32 , GxB_TIMES_FIRST_FC32 , GxB_ANY_FIRST_FC32 , GxB_PLUS_FIRST_FC64 , GxB_TIMES_FIRST_FC64 , GxB_ANY_FIRST_FC64 , GxB_PLUS_SECOND_FC32 , GxB_TIMES_SECOND_FC32 , GxB_ANY_SECOND_FC32 , GxB_PLUS_SECOND_FC64 , GxB_TIMES_SECOND_FC64 , GxB_ANY_SECOND_FC64 , GxB_PLUS_PAIR_FC32 , GxB_TIMES_PAIR_FC32/**/, GxB_ANY_PAIR_FC32 , GxB_PLUS_PAIR_FC64 , GxB_TIMES_PAIR_FC64/**/, GxB_ANY_PAIR_FC64 , GxB_PLUS_PLUS_FC32 , GxB_TIMES_PLUS_FC32 , GxB_ANY_PLUS_FC32 , GxB_PLUS_PLUS_FC64 , GxB_TIMES_PLUS_FC64 , GxB_ANY_PLUS_FC64 , GxB_PLUS_MINUS_FC32 , GxB_TIMES_MINUS_FC32 , GxB_ANY_MINUS_FC32 , GxB_PLUS_MINUS_FC64 , GxB_TIMES_MINUS_FC64 , GxB_ANY_MINUS_FC64 , GxB_PLUS_TIMES_FC32 , GxB_TIMES_TIMES_FC32 , GxB_ANY_TIMES_FC32 , GxB_PLUS_TIMES_FC64 , GxB_TIMES_TIMES_FC64 , GxB_ANY_TIMES_FC64 , GxB_PLUS_DIV_FC32 , GxB_TIMES_DIV_FC32 , GxB_ANY_DIV_FC32 , GxB_PLUS_DIV_FC64 , GxB_TIMES_DIV_FC64 , GxB_ANY_DIV_FC64 , GxB_PLUS_RDIV_FC32 , GxB_TIMES_RDIV_FC32 , GxB_ANY_RDIV_FC32 , GxB_PLUS_RDIV_FC64 , GxB_TIMES_RDIV_FC64 , GxB_ANY_RDIV_FC64 , GxB_PLUS_RMINUS_FC32 , GxB_TIMES_RMINUS_FC32 , GxB_ANY_RMINUS_FC32 , GxB_PLUS_RMINUS_FC64 , GxB_TIMES_RMINUS_FC64 , GxB_ANY_RMINUS_FC64 , //------------------------------------------------------------------------------ // 64 bitwise semirings //------------------------------------------------------------------------------ // monoids: (BOR, BAND, BXOR, BXNOR) x // mult: (BOR, BAND, BXOR, BXNOR) x // types: (UINT8, UINT16, UINT32, UINT64) GxB_BOR_BOR_UINT8 , GxB_BOR_BOR_UINT16 , GxB_BOR_BOR_UINT32 , GxB_BOR_BOR_UINT64 , GxB_BOR_BAND_UINT8 , GxB_BOR_BAND_UINT16 , GxB_BOR_BAND_UINT32 , GxB_BOR_BAND_UINT64 , GxB_BOR_BXOR_UINT8 , GxB_BOR_BXOR_UINT16 , GxB_BOR_BXOR_UINT32 , GxB_BOR_BXOR_UINT64 , GxB_BOR_BXNOR_UINT8 , GxB_BOR_BXNOR_UINT16 , GxB_BOR_BXNOR_UINT32 , GxB_BOR_BXNOR_UINT64 , GxB_BAND_BOR_UINT8 , GxB_BAND_BOR_UINT16 , GxB_BAND_BOR_UINT32 , GxB_BAND_BOR_UINT64 , GxB_BAND_BAND_UINT8 , GxB_BAND_BAND_UINT16 , GxB_BAND_BAND_UINT32 , GxB_BAND_BAND_UINT64 , GxB_BAND_BXOR_UINT8 , GxB_BAND_BXOR_UINT16 , GxB_BAND_BXOR_UINT32 , GxB_BAND_BXOR_UINT64 , GxB_BAND_BXNOR_UINT8 , GxB_BAND_BXNOR_UINT16 , GxB_BAND_BXNOR_UINT32 , GxB_BAND_BXNOR_UINT64 , GxB_BXOR_BOR_UINT8 , GxB_BXOR_BOR_UINT16 , GxB_BXOR_BOR_UINT32 , GxB_BXOR_BOR_UINT64 , GxB_BXOR_BAND_UINT8 , GxB_BXOR_BAND_UINT16 , GxB_BXOR_BAND_UINT32 , GxB_BXOR_BAND_UINT64 , GxB_BXOR_BXOR_UINT8 , GxB_BXOR_BXOR_UINT16 , GxB_BXOR_BXOR_UINT32 , GxB_BXOR_BXOR_UINT64 , GxB_BXOR_BXNOR_UINT8 , GxB_BXOR_BXNOR_UINT16 , GxB_BXOR_BXNOR_UINT32 , GxB_BXOR_BXNOR_UINT64 , GxB_BXNOR_BOR_UINT8 , GxB_BXNOR_BOR_UINT16 , GxB_BXNOR_BOR_UINT32 , GxB_BXNOR_BOR_UINT64 , GxB_BXNOR_BAND_UINT8 , GxB_BXNOR_BAND_UINT16 , GxB_BXNOR_BAND_UINT32 , GxB_BXNOR_BAND_UINT64 , GxB_BXNOR_BXOR_UINT8 , GxB_BXNOR_BXOR_UINT16 , GxB_BXNOR_BXOR_UINT32 , GxB_BXNOR_BXOR_UINT64 , GxB_BXNOR_BXNOR_UINT8 , GxB_BXNOR_BXNOR_UINT16 , GxB_BXNOR_BXNOR_UINT32 , GxB_BXNOR_BXNOR_UINT64 , //------------------------------------------------------------------------------ // 80 positional semirings //------------------------------------------------------------------------------ // monoids: (MIN, MAX, ANY, PLUS, TIMES) x // mult: (FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1, SECONDI, SECONDI1, SECONDJ, SECONDJ1) // types: (INT32, INT64) GxB_MIN_FIRSTI_INT32, GxB_MIN_FIRSTI_INT64, GxB_MAX_FIRSTI_INT32, GxB_MAX_FIRSTI_INT64, GxB_ANY_FIRSTI_INT32, GxB_ANY_FIRSTI_INT64, GxB_PLUS_FIRSTI_INT32, GxB_PLUS_FIRSTI_INT64, GxB_TIMES_FIRSTI_INT32, GxB_TIMES_FIRSTI_INT64, GxB_MIN_FIRSTI1_INT32, GxB_MIN_FIRSTI1_INT64, GxB_MAX_FIRSTI1_INT32, GxB_MAX_FIRSTI1_INT64, GxB_ANY_FIRSTI1_INT32, GxB_ANY_FIRSTI1_INT64, GxB_PLUS_FIRSTI1_INT32, GxB_PLUS_FIRSTI1_INT64, GxB_TIMES_FIRSTI1_INT32, GxB_TIMES_FIRSTI1_INT64, GxB_MIN_FIRSTJ_INT32, GxB_MIN_FIRSTJ_INT64, GxB_MAX_FIRSTJ_INT32, GxB_MAX_FIRSTJ_INT64, GxB_ANY_FIRSTJ_INT32, GxB_ANY_FIRSTJ_INT64, GxB_PLUS_FIRSTJ_INT32, GxB_PLUS_FIRSTJ_INT64, GxB_TIMES_FIRSTJ_INT32, GxB_TIMES_FIRSTJ_INT64, GxB_MIN_FIRSTJ1_INT32, GxB_MIN_FIRSTJ1_INT64, GxB_MAX_FIRSTJ1_INT32, GxB_MAX_FIRSTJ1_INT64, GxB_ANY_FIRSTJ1_INT32, GxB_ANY_FIRSTJ1_INT64, GxB_PLUS_FIRSTJ1_INT32, GxB_PLUS_FIRSTJ1_INT64, GxB_TIMES_FIRSTJ1_INT32, GxB_TIMES_FIRSTJ1_INT64, GxB_MIN_SECONDI_INT32, GxB_MIN_SECONDI_INT64, GxB_MAX_SECONDI_INT32, GxB_MAX_SECONDI_INT64, GxB_ANY_SECONDI_INT32, GxB_ANY_SECONDI_INT64, GxB_PLUS_SECONDI_INT32, GxB_PLUS_SECONDI_INT64, GxB_TIMES_SECONDI_INT32, GxB_TIMES_SECONDI_INT64, GxB_MIN_SECONDI1_INT32, GxB_MIN_SECONDI1_INT64, GxB_MAX_SECONDI1_INT32, GxB_MAX_SECONDI1_INT64, GxB_ANY_SECONDI1_INT32, GxB_ANY_SECONDI1_INT64, GxB_PLUS_SECONDI1_INT32, GxB_PLUS_SECONDI1_INT64, GxB_TIMES_SECONDI1_INT32, GxB_TIMES_SECONDI1_INT64, GxB_MIN_SECONDJ_INT32, GxB_MIN_SECONDJ_INT64, GxB_MAX_SECONDJ_INT32, GxB_MAX_SECONDJ_INT64, GxB_ANY_SECONDJ_INT32, GxB_ANY_SECONDJ_INT64, GxB_PLUS_SECONDJ_INT32, GxB_PLUS_SECONDJ_INT64, GxB_TIMES_SECONDJ_INT32, GxB_TIMES_SECONDJ_INT64, GxB_MIN_SECONDJ1_INT32, GxB_MIN_SECONDJ1_INT64, GxB_MAX_SECONDJ1_INT32, GxB_MAX_SECONDJ1_INT64, GxB_ANY_SECONDJ1_INT32, GxB_ANY_SECONDJ1_INT64, GxB_PLUS_SECONDJ1_INT32, GxB_PLUS_SECONDJ1_INT64, GxB_TIMES_SECONDJ1_INT32, GxB_TIMES_SECONDJ1_INT64 ; //------------------------------------------------------------------------------ // GrB_* semirings //------------------------------------------------------------------------------ // The v1.3 C API for GraphBLAS adds the following 124 predefined semirings, // with GrB_* names. They are identical to 124 GxB_* semirings defined above, // with the same name, except that GrB_LXNOR_LOR_SEMIRING_BOOL is identical to // GxB_EQ_LOR_BOOL (since GrB_EQ_BOOL == GrB_LXNOR). The old names are listed // below alongside each new name; the new GrB_* names are preferred. // 12 kinds of GrB_* semirings are available for all 10 real non-boolean types: // PLUS_TIMES, PLUS_MIN, // MIN_PLUS, MIN_TIMES, MIN_FIRST, MIN_SECOND, MIN_MAX, // MAX_PLUS, MAX_TIMES, MAX_FIRST, MAX_SECOND, MAX_MIN // and 4 semirings for boolean only: // LOR_LAND, LAND_LOR, LXOR_LAND, LXNOR_LOR. // GxB_* semirings corresponding to the equivalent GrB_* semiring are // historical. GB_PUBLIC GrB_Semiring //-------------------------------------------------------------------------- // 20 semirings with PLUS monoids //-------------------------------------------------------------------------- // PLUS_TIMES semirings for all 10 real, non-boolean types: GrB_PLUS_TIMES_SEMIRING_INT8, // GxB_PLUS_TIMES_INT8 GrB_PLUS_TIMES_SEMIRING_INT16, // GxB_PLUS_TIMES_INT16 GrB_PLUS_TIMES_SEMIRING_INT32, // GxB_PLUS_TIMES_INT32 GrB_PLUS_TIMES_SEMIRING_INT64, // GxB_PLUS_TIMES_INT64 GrB_PLUS_TIMES_SEMIRING_UINT8, // GxB_PLUS_TIMES_UINT8 GrB_PLUS_TIMES_SEMIRING_UINT16, // GxB_PLUS_TIMES_UINT16 GrB_PLUS_TIMES_SEMIRING_UINT32, // GxB_PLUS_TIMES_UINT32 GrB_PLUS_TIMES_SEMIRING_UINT64, // GxB_PLUS_TIMES_UINT64 GrB_PLUS_TIMES_SEMIRING_FP32, // GxB_PLUS_TIMES_FP32 GrB_PLUS_TIMES_SEMIRING_FP64, // GxB_PLUS_TIMES_FP64 // PLUS_MIN semirings for all 10 real, non-boolean types: GrB_PLUS_MIN_SEMIRING_INT8, // GxB_PLUS_MIN_INT8 GrB_PLUS_MIN_SEMIRING_INT16, // GxB_PLUS_MIN_INT16 GrB_PLUS_MIN_SEMIRING_INT32, // GxB_PLUS_MIN_INT32 GrB_PLUS_MIN_SEMIRING_INT64, // GxB_PLUS_MIN_INT64 GrB_PLUS_MIN_SEMIRING_UINT8, // GxB_PLUS_MIN_UINT8 GrB_PLUS_MIN_SEMIRING_UINT16, // GxB_PLUS_MIN_UINT16 GrB_PLUS_MIN_SEMIRING_UINT32, // GxB_PLUS_MIN_UINT32 GrB_PLUS_MIN_SEMIRING_UINT64, // GxB_PLUS_MIN_UINT64 GrB_PLUS_MIN_SEMIRING_FP32, // GxB_PLUS_MIN_FP32 GrB_PLUS_MIN_SEMIRING_FP64, // GxB_PLUS_MIN_FP64 //-------------------------------------------------------------------------- // 50 semirings with MIN monoids //-------------------------------------------------------------------------- // MIN_PLUS semirings for all 10 real, non-boolean types: GrB_MIN_PLUS_SEMIRING_INT8, // GxB_MIN_PLUS_INT8 GrB_MIN_PLUS_SEMIRING_INT16, // GxB_MIN_PLUS_INT16 GrB_MIN_PLUS_SEMIRING_INT32, // GxB_MIN_PLUS_INT32 GrB_MIN_PLUS_SEMIRING_INT64, // GxB_MIN_PLUS_INT64 GrB_MIN_PLUS_SEMIRING_UINT8, // GxB_MIN_PLUS_UINT8 GrB_MIN_PLUS_SEMIRING_UINT16, // GxB_MIN_PLUS_UINT16 GrB_MIN_PLUS_SEMIRING_UINT32, // GxB_MIN_PLUS_UINT32 GrB_MIN_PLUS_SEMIRING_UINT64, // GxB_MIN_PLUS_UINT64 GrB_MIN_PLUS_SEMIRING_FP32, // GxB_MIN_PLUS_FP32 GrB_MIN_PLUS_SEMIRING_FP64, // GxB_MIN_PLUS_FP64 // MIN_TIMES semirings for all 10 real, non-boolean types: GrB_MIN_TIMES_SEMIRING_INT8, // GxB_MIN_TIMES_INT8 GrB_MIN_TIMES_SEMIRING_INT16, // GxB_MIN_TIMES_INT16 GrB_MIN_TIMES_SEMIRING_INT32, // GxB_MIN_TIMES_INT32 GrB_MIN_TIMES_SEMIRING_INT64, // GxB_MIN_TIMES_INT64 GrB_MIN_TIMES_SEMIRING_UINT8, // GxB_MIN_TIMES_UINT8 GrB_MIN_TIMES_SEMIRING_UINT16, // GxB_MIN_TIMES_UINT16 GrB_MIN_TIMES_SEMIRING_UINT32, // GxB_MIN_TIMES_UINT32 GrB_MIN_TIMES_SEMIRING_UINT64, // GxB_MIN_TIMES_UINT64 GrB_MIN_TIMES_SEMIRING_FP32, // GxB_MIN_TIMES_FP32 GrB_MIN_TIMES_SEMIRING_FP64, // GxB_MIN_TIMES_FP64 // MIN_FIRST semirings for all 10 real, non-boolean types: GrB_MIN_FIRST_SEMIRING_INT8, // GxB_MIN_FIRST_INT8 GrB_MIN_FIRST_SEMIRING_INT16, // GxB_MIN_FIRST_INT16 GrB_MIN_FIRST_SEMIRING_INT32, // GxB_MIN_FIRST_INT32 GrB_MIN_FIRST_SEMIRING_INT64, // GxB_MIN_FIRST_INT64 GrB_MIN_FIRST_SEMIRING_UINT8, // GxB_MIN_FIRST_UINT8 GrB_MIN_FIRST_SEMIRING_UINT16, // GxB_MIN_FIRST_UINT16 GrB_MIN_FIRST_SEMIRING_UINT32, // GxB_MIN_FIRST_UINT32 GrB_MIN_FIRST_SEMIRING_UINT64, // GxB_MIN_FIRST_UINT64 GrB_MIN_FIRST_SEMIRING_FP32, // GxB_MIN_FIRST_FP32 GrB_MIN_FIRST_SEMIRING_FP64, // GxB_MIN_FIRST_FP64 // MIN_SECOND semirings for all 10 real, non-boolean types: GrB_MIN_SECOND_SEMIRING_INT8, // GxB_MIN_SECOND_INT8 GrB_MIN_SECOND_SEMIRING_INT16, // GxB_MIN_SECOND_INT16 GrB_MIN_SECOND_SEMIRING_INT32, // GxB_MIN_SECOND_INT32 GrB_MIN_SECOND_SEMIRING_INT64, // GxB_MIN_SECOND_INT64 GrB_MIN_SECOND_SEMIRING_UINT8, // GxB_MIN_SECOND_UINT8 GrB_MIN_SECOND_SEMIRING_UINT16, // GxB_MIN_SECOND_UINT16 GrB_MIN_SECOND_SEMIRING_UINT32, // GxB_MIN_SECOND_UINT32 GrB_MIN_SECOND_SEMIRING_UINT64, // GxB_MIN_SECOND_UINT64 GrB_MIN_SECOND_SEMIRING_FP32, // GxB_MIN_SECOND_FP32 GrB_MIN_SECOND_SEMIRING_FP64, // GxB_MIN_SECOND_FP64 // MIN_MAX semirings for all 10 real, non-boolean types: GrB_MIN_MAX_SEMIRING_INT8, // GxB_MIN_MAX_INT8 GrB_MIN_MAX_SEMIRING_INT16, // GxB_MIN_MAX_INT16 GrB_MIN_MAX_SEMIRING_INT32, // GxB_MIN_MAX_INT32 GrB_MIN_MAX_SEMIRING_INT64, // GxB_MIN_MAX_INT64 GrB_MIN_MAX_SEMIRING_UINT8, // GxB_MIN_MAX_UINT8 GrB_MIN_MAX_SEMIRING_UINT16, // GxB_MIN_MAX_UINT16 GrB_MIN_MAX_SEMIRING_UINT32, // GxB_MIN_MAX_UINT32 GrB_MIN_MAX_SEMIRING_UINT64, // GxB_MIN_MAX_UINT64 GrB_MIN_MAX_SEMIRING_FP32, // GxB_MIN_MAX_FP32 GrB_MIN_MAX_SEMIRING_FP64, // GxB_MIN_MAX_FP64 //-------------------------------------------------------------------------- // 50 semirings with MAX monoids //-------------------------------------------------------------------------- // MAX_PLUS semirings for all 10 real, non-boolean types GrB_MAX_PLUS_SEMIRING_INT8, // GxB_MAX_PLUS_INT8 GrB_MAX_PLUS_SEMIRING_INT16, // GxB_MAX_PLUS_INT16 GrB_MAX_PLUS_SEMIRING_INT32, // GxB_MAX_PLUS_INT32 GrB_MAX_PLUS_SEMIRING_INT64, // GxB_MAX_PLUS_INT64 GrB_MAX_PLUS_SEMIRING_UINT8, // GxB_MAX_PLUS_UINT8 GrB_MAX_PLUS_SEMIRING_UINT16, // GxB_MAX_PLUS_UINT16 GrB_MAX_PLUS_SEMIRING_UINT32, // GxB_MAX_PLUS_UINT32 GrB_MAX_PLUS_SEMIRING_UINT64, // GxB_MAX_PLUS_UINT64 GrB_MAX_PLUS_SEMIRING_FP32, // GxB_MAX_PLUS_FP32 GrB_MAX_PLUS_SEMIRING_FP64, // GxB_MAX_PLUS_FP64 // MAX_TIMES semirings for all 10 real, non-boolean types: GrB_MAX_TIMES_SEMIRING_INT8, // GxB_MAX_TIMES_INT8 GrB_MAX_TIMES_SEMIRING_INT16, // GxB_MAX_TIMES_INT16 GrB_MAX_TIMES_SEMIRING_INT32, // GxB_MAX_TIMES_INT32 GrB_MAX_TIMES_SEMIRING_INT64, // GxB_MAX_TIMES_INT64 GrB_MAX_TIMES_SEMIRING_UINT8, // GxB_MAX_TIMES_UINT8 GrB_MAX_TIMES_SEMIRING_UINT16, // GxB_MAX_TIMES_UINT16 GrB_MAX_TIMES_SEMIRING_UINT32, // GxB_MAX_TIMES_UINT32 GrB_MAX_TIMES_SEMIRING_UINT64, // GxB_MAX_TIMES_UINT64 GrB_MAX_TIMES_SEMIRING_FP32, // GxB_MAX_TIMES_FP32 GrB_MAX_TIMES_SEMIRING_FP64, // GxB_MAX_TIMES_FP64 // MAX_FIRST semirings for all 10 real, non-boolean types: GrB_MAX_FIRST_SEMIRING_INT8, // GxB_MAX_FIRST_INT8 GrB_MAX_FIRST_SEMIRING_INT16, // GxB_MAX_FIRST_INT16 GrB_MAX_FIRST_SEMIRING_INT32, // GxB_MAX_FIRST_INT32 GrB_MAX_FIRST_SEMIRING_INT64, // GxB_MAX_FIRST_INT64 GrB_MAX_FIRST_SEMIRING_UINT8, // GxB_MAX_FIRST_UINT8 GrB_MAX_FIRST_SEMIRING_UINT16, // GxB_MAX_FIRST_UINT16 GrB_MAX_FIRST_SEMIRING_UINT32, // GxB_MAX_FIRST_UINT32 GrB_MAX_FIRST_SEMIRING_UINT64, // GxB_MAX_FIRST_UINT64 GrB_MAX_FIRST_SEMIRING_FP32, // GxB_MAX_FIRST_FP32 GrB_MAX_FIRST_SEMIRING_FP64, // GxB_MAX_FIRST_FP64 // MAX_SECOND semirings for all 10 real, non-boolean types: GrB_MAX_SECOND_SEMIRING_INT8, // GxB_MAX_SECOND_INT8 GrB_MAX_SECOND_SEMIRING_INT16, // GxB_MAX_SECOND_INT16 GrB_MAX_SECOND_SEMIRING_INT32, // GxB_MAX_SECOND_INT32 GrB_MAX_SECOND_SEMIRING_INT64, // GxB_MAX_SECOND_INT64 GrB_MAX_SECOND_SEMIRING_UINT8, // GxB_MAX_SECOND_UINT8 GrB_MAX_SECOND_SEMIRING_UINT16, // GxB_MAX_SECOND_UINT16 GrB_MAX_SECOND_SEMIRING_UINT32, // GxB_MAX_SECOND_UINT32 GrB_MAX_SECOND_SEMIRING_UINT64, // GxB_MAX_SECOND_UINT64 GrB_MAX_SECOND_SEMIRING_FP32, // GxB_MAX_SECOND_FP32 GrB_MAX_SECOND_SEMIRING_FP64, // GxB_MAX_SECOND_FP64 // MAX_MIN semirings for all 10 real, non-boolean types: GrB_MAX_MIN_SEMIRING_INT8, // GxB_MAX_MIN_INT8 GrB_MAX_MIN_SEMIRING_INT16, // GxB_MAX_MIN_INT16 GrB_MAX_MIN_SEMIRING_INT32, // GxB_MAX_MIN_INT32 GrB_MAX_MIN_SEMIRING_INT64, // GxB_MAX_MIN_INT64 GrB_MAX_MIN_SEMIRING_UINT8, // GxB_MAX_MIN_UINT8 GrB_MAX_MIN_SEMIRING_UINT16, // GxB_MAX_MIN_UINT16 GrB_MAX_MIN_SEMIRING_UINT32, // GxB_MAX_MIN_UINT32 GrB_MAX_MIN_SEMIRING_UINT64, // GxB_MAX_MIN_UINT64 GrB_MAX_MIN_SEMIRING_FP32, // GxB_MAX_MIN_FP32 GrB_MAX_MIN_SEMIRING_FP64, // GxB_MAX_MIN_FP64 //-------------------------------------------------------------------------- // 4 boolean semirings: //-------------------------------------------------------------------------- GrB_LOR_LAND_SEMIRING_BOOL, // GxB_LOR_LAND_BOOL GrB_LAND_LOR_SEMIRING_BOOL, // GxB_LAND_LOR_BOOL GrB_LXOR_LAND_SEMIRING_BOOL, // GxB_LXOR_LAND_BOOL GrB_LXNOR_LOR_SEMIRING_BOOL ; // GxB_EQ_LOR_BOOL (note EQ == LXNOR) //============================================================================== // GrB_*_resize: change the size of a matrix or vector //============================================================================== // If the dimensions decrease, entries that fall outside the resized matrix or // vector are deleted. GB_PUBLIC GrB_Info GrB_Matrix_resize // change the size of a matrix ( GrB_Matrix C, // matrix to modify GrB_Index nrows_new, // new number of rows in matrix GrB_Index ncols_new // new number of columns in matrix ) ; GB_PUBLIC GrB_Info GrB_Vector_resize // change the size of a vector ( GrB_Vector w, // vector to modify GrB_Index nrows_new // new number of rows in vector ) ; // GxB_*_resize are identical to the GrB_*resize methods above GB_PUBLIC GrB_Info GxB_Matrix_resize // change the size of a matrix (historical) ( GrB_Matrix C, // matrix to modify GrB_Index nrows_new, // new number of rows in matrix GrB_Index ncols_new // new number of columns in matrix ) ; GB_PUBLIC GrB_Info GxB_Vector_resize // change the size of a vector (historical) ( GrB_Vector w, // vector to modify GrB_Index nrows_new // new number of rows in vector ) ; // GxB_resize is a generic function for resizing a matrix or vector: // GrB_Vector_resize (u,nrows_new) // GrB_Matrix_resize (A,nrows_new,ncols_new) #if GxB_STDC_VERSION >= 201112L #define GxB_resize(arg1,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : GrB_Vector_resize , \ GrB_Matrix : GrB_Matrix_resize \ ) \ (arg1, __VA_ARGS__) #endif //============================================================================== // GxB_fprint and GxB_print: print the contents of a GraphBLAS object //============================================================================== // GxB_fprint (object, GxB_Print_Level pr, FILE *f) prints the contents of any // of the 9 GraphBLAS objects to the file f, and also does an extensive test on // the object to determine if it is valid. It returns one of the following // error conditions: // // GrB_SUCCESS object is valid // GrB_UNINITIALIZED_OBJECT object is not initialized // GrB_INVALID_OBJECT object is not valid // GrB_NULL_POINTER object is a NULL pointer // GrB_INVALID_VALUE fprintf returned an I/O error; see the ANSI C // errno or GrB_error( )for details. // // GxB_fprint does not modify the status of any object. If a matrix or vector // has not been completed, the pending computations are guaranteed to *not* be // performed by GxB_fprint. The reason is simple. It is possible for a bug in // the user application (such as accessing memory outside the bounds of an // array) to mangle the internal content of a GraphBLAS object, and GxB_fprint // can be a helpful tool to track down this bug. If GxB_fprint attempted to // complete any computations prior to printing or checking the contents of the // matrix or vector, then further errors could occur, including a segfault. // // The type-specific functions include an additional argument, the name string. // The name is printed at the beginning of the display (assuming pr is not // GxB_SILENT) so that the object can be more easily identified in the output. // For the type-generic methods GxB_fprint and GxB_print, the name string is // the variable name of the object itself. // // If f is NULL, stdout is used; this is not an error condition. If pr is // outside the bounds 0 to 3, negative values are treated as GxB_SILENT, and // values > 3 are treated as GxB_COMPLETE. If name is NULL, it is treated as // the empty string. // // GxB_print (object, GxB_Print_Level pr) is the same as GxB_fprint, except // that it prints the contents with printf instead of fprintf to a file f. // // The exact content and format of what is printed is implementation-dependent, // and will change from version to version of SuiteSparse:GraphBLAS. Do not // attempt to rely on the exact content or format by trying to parse the // resulting output via another program. The intent of these functions is to // produce a report of the object for visual inspection. typedef enum { GxB_SILENT = 0, // nothing is printed, just check the object GxB_SUMMARY = 1, // print a terse summary GxB_SHORT = 2, // short description, about 30 entries of a matrix GxB_COMPLETE = 3, // print the entire contents of the object GxB_SHORT_VERBOSE = 4, // GxB_SHORT but with "%.15g" for doubles GxB_COMPLETE_VERBOSE = 5 // GxB_COMPLETE but with "%.15g" for doubles } GxB_Print_Level ; GB_PUBLIC GrB_Info GxB_Type_fprint // print and check a GrB_Type ( GrB_Type type, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_UnaryOp_fprint // print and check a GrB_UnaryOp ( GrB_UnaryOp unaryop, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_fprint // print and check a GrB_BinaryOp ( GrB_BinaryOp binaryop, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_IndexUnaryOp_fprint // print and check a GrB_IndexUnaryOp ( GrB_IndexUnaryOp op, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_SelectOp_fprint // print and check a GxB_SelectOp ( GxB_SelectOp selectop, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Monoid_fprint // print and check a GrB_Monoid ( GrB_Monoid monoid, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Semiring_fprint // print and check a GrB_Semiring ( GrB_Semiring semiring, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Descriptor_fprint // print and check a GrB_Descriptor ( GrB_Descriptor descriptor, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Matrix_fprint // print and check a GrB_Matrix ( GrB_Matrix A, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Vector_fprint // print and check a GrB_Vector ( GrB_Vector v, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Scalar_fprint // print and check a GrB_Scalar ( GrB_Scalar s, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; #if GxB_STDC_VERSION >= 201112L #define GxB_fprint(object,pr,f) \ _Generic \ ( \ (object), \ const GrB_Type : GxB_Type_fprint , \ GrB_Type : GxB_Type_fprint , \ const GrB_UnaryOp : GxB_UnaryOp_fprint , \ GrB_UnaryOp : GxB_UnaryOp_fprint , \ const GrB_BinaryOp : GxB_BinaryOp_fprint , \ GrB_BinaryOp : GxB_BinaryOp_fprint , \ const GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \ GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \ const GxB_SelectOp : GxB_SelectOp_fprint , \ GxB_SelectOp : GxB_SelectOp_fprint , \ const GrB_Monoid : GxB_Monoid_fprint , \ GrB_Monoid : GxB_Monoid_fprint , \ const GrB_Semiring : GxB_Semiring_fprint , \ GrB_Semiring : GxB_Semiring_fprint , \ const GrB_Scalar : GxB_Scalar_fprint , \ GrB_Scalar : GxB_Scalar_fprint , \ const GrB_Vector : GxB_Vector_fprint , \ GrB_Vector : GxB_Vector_fprint , \ const GrB_Matrix : GxB_Matrix_fprint , \ GrB_Matrix : GxB_Matrix_fprint , \ const GrB_Descriptor : GxB_Descriptor_fprint , \ GrB_Descriptor : GxB_Descriptor_fprint \ ) \ (object, GB_STR(object), pr, f) #define GxB_print(object,pr) GxB_fprint(object,pr,NULL) #endif //============================================================================== // Matrix and vector import/export/pack/unpack //============================================================================== // The import/export/pack/unpack functions allow the user application to create // a GrB_Matrix or GrB_Vector object, and to extract its contents, faster and // with less memory overhead than the GrB_*_build and GrB_*_extractTuples // functions. // The semantics of import/export/pack/unpack are the same as the "move // constructor" in C++. On import, the user provides a set of arrays that have // been previously allocated via the ANSI C malloc function. The arrays define // the content of the matrix or vector. Unlike GrB_*_build, the GraphBLAS // library then takes ownership of the user's input arrays and may either (a) // incorporate them into its internal data structure for the new GrB_Matrix or // GrB_Vector, potentially creating the GrB_Matrix or GrB_Vector in constant // time with no memory copying performed, or (b) if the library does not // support the import format directly, then it may convert the input to its // internal format, and then free the user's input arrays. GraphBLAS may also // choose to use a mix of the two strategies. In either case, the input arrays // are no longer "owned" by the user application. If A is a GrB_Matrix created // by an import/pack, the user input arrays are freed no later than GrB_free // (&A), and may be freed earlier, at the discretion of the GraphBLAS library. // The data structure of the GrB_Matrix and GrB_Vector remain opaque. // The export/unpack of a GrB_Matrix or GrB_Vector is symmetric with the import // operation. The export is destructive, where the GrB_Matrix or GrB_Vector no // longer exists when the export completes. The GrB_Matrix or GrB_Vector // exists after an unpack operation, just with no entries. In both export and // unpack, the user is returned several arrays that contain the matrix or // vector in the requested format. Ownership of these arrays is given to the // user application, which is then responsible for freeing them via the ANSI C // free function. If the output format is supported by the GraphBLAS library, // then these arrays may be returned to the user application in O(1) time and // with no memory copying performed. Otherwise, the GraphBLAS library will // create the output arrays for the user (via the ANSI C malloc function), fill // them with the GrB_Matrix or GrB_Vector data, and then return the newly // allocated arrays to the user. // Eight different formats are provided for import/export. For each format, // the Ax array has a C-type <type> corresponding to one of the 13 built-in // types in GraphBLAS (bool, int*_t, uint*_t, float, double, float complex, or // double complex), or a user-defined type. // On import/pack, the required user arrays Ah, Ap, Ab, Ai, Aj, and/or Ax must // be non-NULL pointers to memory space allocated by the ANSI C malloc (or // calloc, or realloc), unless nzmax is zero (in which case the Ab, Ai, Aj, Ax, // vb, vi, and vx arrays may all be NULL). For the import, A (or GrB_Vector v) // is undefined on input, just like GrB_*_new, the GrB_Matrix. If the import // is successful, the GrB_Matrix A or GrB_Vector v is created, and the pointers // to the user input arrays have been set to NULL. These user arrays have // either been incorporated directly into the GrB_Matrix A or GrB_Vector v, in // which case the user input arrays will eventually be freed by GrB_free (&A), // or their contents have been copied and the arrays freed. This decision is // made by the GraphBLAS library itself, and the user application has no // control over this decision. // If any of the arrays Ab, Aj, Ai, Ax, vb, vi, or vx have zero size (with // nzmax of zero), they are allowed to be be NULL pointers on input. // A matrix or vector may be "iso", where all entries present in the pattern // have the same value. In this case, the boolean iso flag is true, and the // corresponding numerical array (Ax for matrices, vx for vectors, below) need // be only large enough to hold a single value. // No error checking is performed on the content of the user input arrays. If // the user input arrays do not conform to the precise specifications above, // results are undefined. No typecasting of the values of the matrix or vector // entries is performed on import or export. // SuiteSparse:GraphBLAS supports all eight formats natively (CSR, CSC, // HyperCSR, and HyperCSC, BitmapR, BitmapC, FullR, FullC). For vectors, only // CSC, BitmapC, and FullC formats are used. On import, the all eight formats // take O(1) time and memory to import. On export, if the GrB_Matrix or // GrB_Vector is already in this particular format, then the export takes O(1) // time and no memory copying is performed. // If the import is not successful, the GxB_Matrix_import_* functions return A // as NULL, GxB_Vector_import returns v as NULL, and the user input arrays are // neither modified nor freed. They are still owned by the user application. // If the input data is untrusted, use the following descriptor setting for // GxB_Matrix_import* and GxB_Matrix_pack*. The import/pack will be slower, // but secure. GrB_Matrix_import uses the slow, secure method, since it has // no descriptor input. // // GxB_set (desc, GxB_IMPORT, GxB_SECURE_IMPORT) ; // As of v5.2.0, GxB_*import* and GxB_*export* are declared historical. Use // GxB_*pack* and GxB_*unpack* instead. The GxB import/export will be kept // but only documented here, not in the User Guide. //------------------------------------------------------------------------------ // GxB_Matrix_pack_CSR: pack a CSR matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_CSR // historical: use GxB_Matrix_pack_CSR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_CSR // pack a CSR matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; // CSR: an nrows-by-ncols matrix with nvals entries in CSR format consists // of 3 arrays, where nvals = Ap [nrows]: // // GrB_Index Ap [nrows+1], Aj [nvals] ; <type> Ax [nvals] ; // // The column indices of entries in the ith row of the matrix are held // in Aj [Ap [i] ... Ap[i+1]], and the corresponding values are held // in the same positions in Ax. Column indices must be in the range 0 // to ncols-1. If jumbled is false, the column indices must appear in // sorted order within each row. No duplicate column indices may // appear in any row. Ap [0] must equal zero, and Ap [nrows] must // equal nvals. The Ap array must be of size nrows+1 (or larger), and // the Aj and Ax arrays must have size at least nvals. If nvals is // zero, then the Aj and Ax arrays need not be present and can be // NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_CSC: pack a CSC matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_CSC // historical: use GxB_Matrix_pack_CSC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_CSC // pack a CSC matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; // CSC: an nrows-by-ncols matrix with nvals entries in CSC format consists // of 3 arrays, where nvals = Ap [ncols]: // // GrB_Index Ap [ncols+1], Ai [nvals] ; <type> Ax [nvals] ; // // The row indices of entries in the jth column of the matrix are held // in Ai [Ap [j] ... Ap[j+1]], and the corresponding values are held // in the same positions in Ax. Row indices must be in the range 0 to // nrows-1. If jumbled is false, the row indices must appear in // sorted order within each column. No duplicate row indices may // appear in any column. Ap [0] must equal zero, and Ap [ncols] must // equal nvals. The Ap array must be of size ncols+1 (or larger), and // the Ai and Ax arrays must have size at least nvals. If nvals is // zero, then the Ai and Ax arrays need not be present and can be // NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_HyperCSR: pack a hypersparse CSR matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_HyperCSR // historical: use GxB_Matrix_pack_HyperCSR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of rows that appear in Ah bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_HyperCSR // pack a hypersparse CSR matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of rows that appear in Ah bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; // HyperCSR: an nrows-by-ncols matrix with nvals entries and nvec // rows that may have entries in HyperCSR format consists of 4 arrays, // where nvals = Ap [nvec]: // // GrB_Index Ah [nvec], Ap [nvec+1], Aj [nvals] ; // <type> Ax [nvals] ; // // The Aj and Ax arrays are the same for a matrix in CSR or HyperCSR // format. Only Ap and Ah differ. // // The Ah array is a list of the row indices of rows that appear in // the matrix. It // must appear in sorted order, and no duplicates may appear. If i = // Ah [k] is the kth row, then the column indices of the ith // row appear in Aj [Ap [k] ... Ap [k+1]], and the corresponding // values appear in the same locations in Ax. Column indices must be // in the range 0 to ncols-1, and must appear in sorted order within // each row. No duplicate column indices may appear in any row. nvec // may be zero, to denote an array with no entries. The Ah array must // be of size at least nvec, Ap must be of size at least nvec+1, and // Aj and Ax must be at least of size nvals. If nvals is zero, then // the Aj and Ax arrays need not be present and can be NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_HyperCSC: pack a hypersparse CSC matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_HyperCSC // historical: use GxB_Matrix_pack_HyperCSC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A)*(type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of columns that appear in Ah bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_HyperCSC // pack a hypersparse CSC matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A)*(type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of columns that appear in Ah bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; // HyperCSC: an nrows-by-ncols matrix with nvals entries and nvec // columns that may have entries in HyperCSC format consists of 4 arrays, // where nvals = Ap [nvec]: // // // GrB_Index Ah [nvec], Ap [nvec+1], Ai [nvals] ; // <type> Ax [nvals] ; // // The Ai and Ax arrays are the same for a matrix in CSC or HyperCSC // format. Only Ap and Ah differ. // // The Ah array is a list of the column indices of non-empty columns. // It must appear in sorted order, and no duplicates may appear. If j // = Ah [k] is the kth non-empty column, then the row indices of the // jth column appear in Ai [Ap [k] ... Ap [k+1]], and the // corresponding values appear in the same locations in Ax. Row // indices must be in the range 0 to nrows-1, and must appear in // sorted order within each column. No duplicate row indices may // appear in any column. nvec may be zero, to denote an array with no // entries. The Ah array must be of size at least nvec, Ap must be of // size at least nvec+1, and Ai and Ax must be at least of size nvals. // If nvals is zero, then the Ai and Ax arrays need not be present and // can be NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_BitmapR: pack a bitmap matrix, held by row //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_BitmapR // historical: use GxB_Matrix_pack_BitmapR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_BitmapR // pack a bitmap matrix, held by row ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; // BitmapR: a dense format, but able to represent sparsity structure of A. // // int8_t Ab [nrows*ncols] ; // <type> Ax [nrows*ncols] ; // // Ab and Ax are both of size nrows*ncols. Ab [i*ncols+j] = 1 if the // A(i,j) entry is present with value Ax [i*ncols+j], or 0 if A(i,j) // is not present. nvals must equal the number of 1's in the Ab // array. //------------------------------------------------------------------------------ // GxB_Matrix_pack_BitmapC: pack a bitmap matrix, held by column //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_BitmapC // historical: use GxB_Matrix_pack_BitmapC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_BitmapC // pack a bitmap matrix, held by column ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; // BitmapC: a dense format, but able to represent sparsity structure of A. // // int8_t Ab [nrows*ncols] ; // <type> Ax [nrows*ncols] ; // // Ab and Ax are both of size nrows*ncols. Ab [i+j*nrows] = 1 if the // A(i,j) entry is present with value Ax [i+j*nrows], or 0 if A(i,j) // is not present. nvals must equal the number of 1's in the Ab // array. //------------------------------------------------------------------------------ // GxB_Matrix_pack_FullR: pack a full matrix, held by row //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_FullR // historical: use GxB_Matrix_pack_FullR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_FullR // pack a full matrix, held by row ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; // FullR: an nrows-by-ncols full matrix held in row-major order: // // <type> Ax [nrows*ncols] ; // // Ax is an array of size nrows*ncols, where A(i,j) is held in // Ax [i*ncols+j]. All entries in A are present. //------------------------------------------------------------------------------ // GxB_Matrix_pack_FullC: pack a full matrix, held by column //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_FullC // historical: use GxB_Matrix_pack_FullC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_FullC // pack a full matrix, held by column ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; // FullC: an nrows-by-ncols full matrix held in column-major order: // // <type> Ax [nrows*ncols] ; // // Ax is an array of size nrows*ncols, where A(i,j) is held in // Ax [i+j*nrows]. All entries in A are present. //------------------------------------------------------------------------------ // GxB_Vector_pack_CSC: import/pack a vector in CSC format //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Vector_import_CSC // historical: use GxB_Vector_pack_CSC ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n, // vector length GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t) void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vi_size, // size of vi in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in vector bool jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_pack_CSC // pack a vector in CSC format ( GrB_Vector v, // vector to create (type and length unchanged) GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t) void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vi_size, // size of vi in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in vector bool jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; // The GrB_Vector is treated as if it was a single column of an n-by-1 // matrix in CSC format, except that no vp array is required. If nvals is // zero, then the vi and vx arrays need not be present and can be NULL. //------------------------------------------------------------------------------ // GxB_Vector_pack_Bitmap: pack a vector in bitmap format //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Vector_import_Bitmap // historical: GxB_Vector_pack_Bitmap ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n, // vector length int8_t **vb, // bitmap, vb_size >= n void **vx, // values, vx_size >= n * (type size) // or vx_size >= (type size), if iso is true GrB_Index vb_size, // size of vb in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_pack_Bitmap // pack a bitmap vector ( GrB_Vector v, // vector to create (type and length unchanged) int8_t **vb, // bitmap, vb_size >= n void **vx, // values, vx_size >= n * (type size) // or vx_size >= (type size), if iso is true GrB_Index vb_size, // size of vb in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; // The GrB_Vector is treated as if it was a single column of an n-by-1 // matrix in BitmapC format. //------------------------------------------------------------------------------ // GxB_Vector_pack_Full: pack a vector in full format //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Vector_import_Full // historical: use GxB_Vector_pack_Full ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n, // vector length void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_pack_Full // pack a full vector ( GrB_Vector v, // vector to create (type and length unchanged) void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso const GrB_Descriptor desc ) ; // The GrB_Vector is treated as if it was a single column of an n-by-1 // matrix in FullC format. //------------------------------------------------------------------------------ // GxB* export/unpack //------------------------------------------------------------------------------ // The GxB_*_export/unpack functions are symmetric with the GxB_*_import/pack // functions. The export/unpack functions force completion of any pending // operations, prior to the export, except if the only pending operation is to // unjumble the matrix. // // If there are no entries in the matrix or vector, then the index arrays (Ai, // Aj, or vi) and value arrays (Ax or vx) are returned as NULL. This is not an // error condition. // // A GrB_Matrix may be exported/unpacked in any one of four different formats. // On successful export, the input GrB_Matrix A is freed, and the output arrays // Ah, Ap, Ai, Aj, and/or Ax are returned to the user application as arrays // allocated by the ANSI C malloc function. The four formats are the same as // the import formats for GxB_Matrix_import/pack. // // If jumbled is NULL on input, this indicates to GxB_*export/unpack* that the // exported/unpacked matrix cannot be returned in a jumbled format. In this // case, if the matrix is jumbled, it is sorted before exporting it to the // caller. // // If iso is NULL on input, this indicates to the export/unpack methods that // the exported/unpacked matrix cannot be returned in a iso format, with an Ax // array with just one entry. In this case, if the matrix is iso, it is // expanded before exporting/unpacking it to the caller. // // For the export/unpack*Full* methods, all entries in the matrix or must be // present. That is, GrB_*_nvals must report nvals equal to nrows*ncols or a // matrix. If this condition does not hold, the matrix/vector is not exported, // and GrB_INVALID_VALUE is returned. // // If the export/unpack is not successful, the export/unpack functions do not // modify matrix or vector and the user arrays are returned as NULL. GB_PUBLIC GrB_Info GxB_Matrix_export_CSR // historical: use GxB_Matrix_unpack_CSR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers" GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_CSR // unpack a CSR matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers" GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_CSC // historical: use GxB_Matrix_unpack_CSC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // column "pointers" GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_CSC // unpack a CSC matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // column "pointers" GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_HyperCSR // historical: use GxB_Matrix_unpack_HyperCSR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers" GrB_Index **Ah, // row indices GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of rows that appear in Ah bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_HyperCSR // unpack a hypersparse CSR matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers" GrB_Index **Ah, // row indices GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of rows that appear in Ah bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_HyperCSC // historical: use GxB_Matrix_unpack_HyperCSC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // column "pointers" GrB_Index **Ah, // column indices GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of columns that appear in Ah bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_HyperCSC // unpack a hypersparse CSC matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // column "pointers" GrB_Index **Ah, // column indices GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of columns that appear in Ah bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_BitmapR // historical: use GxB_Matrix_unpack_BitmapR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_BitmapR // unpack a bitmap matrix, by row ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_BitmapC // historical: use GxB_Matrix_unpack_BitmapC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_BitmapC // unpack a bitmap matrix, by col ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FullR // historical: use GxB_Matrix_unpack_FullR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_FullR // unpack a full matrix, by row ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FullC // historical: use GxB_Matrix_unpack_FullC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_FullC // unpack a full matrix, by column ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_export_CSC // historical: use GxB_Vector_unpack_CSC ( GrB_Vector *v, // handle of vector to export and free GrB_Type *type, // type of vector exported GrB_Index *n, // length of the vector GrB_Index **vi, // indices void **vx, // values GrB_Index *vi_size, // size of vi in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in vector bool *jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_unpack_CSC // unpack a CSC vector ( GrB_Vector v, // vector to unpack (type and length unchanged) GrB_Index **vi, // indices void **vx, // values GrB_Index *vi_size, // size of vi in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in vector bool *jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_export_Bitmap // historical: use GxB_Vector_unpack_Bitmap ( GrB_Vector *v, // handle of vector to export and free GrB_Type *type, // type of vector exported GrB_Index *n, // length of the vector int8_t **vb, // bitmap void **vx, // values GrB_Index *vb_size, // size of vb in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_unpack_Bitmap // unpack a bitmap vector ( GrB_Vector v, // vector to unpack (type and length unchanged) int8_t **vb, // bitmap void **vx, // values GrB_Index *vb_size, // size of vb in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_export_Full // historical: use GxB_Vector_unpack_Full ( GrB_Vector *v, // handle of vector to export and free GrB_Type *type, // type of vector exported GrB_Index *n, // length of the vector void **vx, // values GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_unpack_Full // unpack a full vector ( GrB_Vector v, // vector to unpack (type and length unchanged) void **vx, // values GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso const GrB_Descriptor desc ) ; //============================================================================== // GrB import/export //============================================================================== // The GrB_Matrix_import method copies from user-provided arrays into an // opaque GrB_Matrix and GrB_Matrix_export copies data out, from an opaque // GrB_Matrix into user-provided arrays. Unlike the GxB pack/unpack methods, // memory is not handed off between the user application and GraphBLAS. // These methods are much slower than the GxB pack/unpack methods, since they // require a copy of the data to be made. GrB_Matrix_import also must assume // its input data cannot be trusted, and so it does extensive checks. The GxB // pack takes O(1) time in all cases (unless it is told the input data is // untrusted, via the descriptor). GxB unpack takes O(1) time unless the // matrix is exported in a different format than it currently has. // No typecasting of the values is done on import or export. // The GrB C API specification supports 3 formats: typedef enum { GrB_CSR_FORMAT = 0, // CSR format (equiv to GxB_SPARSE with GxB_BY_ROW) GrB_CSC_FORMAT = 1, // CSC format (equiv to GxB_SPARSE with GxB_BY_COL) GrB_COO_FORMAT = 2 // triplet format (like input to GrB*build) } GrB_Format ; GB_PUBLIC GrB_Info GrB_Matrix_import_BOOL // import a GrB_BOOL matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_BOOL) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const bool *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT8 // import a GrB_INT8 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_iNT8) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int8_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT16 // import a GrB_INT16 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_INT16) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int16_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT32 // import a GrB_INT32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_INT32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int32_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT64 // import a GrB_INT64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_INT64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int64_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT8 // import a GrB_UINT8 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT8) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint8_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT16 // import a GrB_UINT16 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT16) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint16_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT32 // import a GrB_UINT32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint32_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT64 // import a GrB_UINT64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint64_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_FP32 // import a GrB_FP32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_FP32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const float *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_FP64 // import a GrB_FP64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_FP64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const double *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GxB_Matrix_import_FC32 // import a GxB_FC32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GxB_FC32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const GxB_FC32_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GxB_Matrix_import_FC64 // import a GxB_FC64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GxB_FC64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const GxB_FC64_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UDT // import a matrix with a user-defined type ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const void *Ax, // values (must match the type parameter) GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_import(A,type,nrows,ncols,Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt)\ _Generic \ ( \ (Ax), \ GB_CASES (*, GrB, Matrix_import) \ ) \ (A, type, nrows, ncols, Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt) #endif // For GrB_Matrix_export_T: on input, Ap_len, Ai_len, and Ax_len are // the size of the 3 arrays Ap, Ai, and Ax, in terms of the # of entries. // On output, these 3 values are modified to be the # of entries copied // into those 3 arrays. GB_PUBLIC GrB_Info GrB_Matrix_export_BOOL // export a GrB_BOOL matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC bool *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_BOOL) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT8 // export a GrB_INT8 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int8_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT8) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT16 // export a GrB_INT16 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int16_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT16) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT32 // export a GrB_INT32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int32_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT32) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT64 // export a GrB_INT64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int64_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT64) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT8 // export a GrB_UINT8 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint8_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT8) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT16 // export a GrB_UINT16 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint16_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT16) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT32 // export a GrB_UINT32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint32_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT32) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT64 // export a GrB_UINT64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint64_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT64) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_FP32 // export a GrB_FP32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC float *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FP32) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_FP64 // export a GrB_FP64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC double *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FP64) ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FC32 // export a GrB_FC32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC GxB_FC32_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FC32) ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FC64 // export a GrB_FC64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC GxB_FC64_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FC64) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UDT // export a matrix with a user-defined type ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC void *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_export(Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt,A) \ _Generic \ ( \ (Ax), \ GB_CASES (*, GrB, Matrix_export) \ ) \ (Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt, A) #endif GB_PUBLIC GrB_Info GrB_Matrix_exportSize // determine sizes of user arrays for export ( GrB_Index *Ap_len, // # of entries required for Ap (not # of bytes) GrB_Index *Ai_len, // # of entries required for Ai (not # of bytes) GrB_Index *Ax_len, // # of entries required for Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export ) ; GB_PUBLIC GrB_Info GrB_Matrix_exportHint // suggest the best export format ( GrB_Format *format, // export format GrB_Matrix A // matrix to export ) ; //============================================================================== // serialize/deserialize //============================================================================== // GxB_Matrix_serialize copies the contents of a GrB_Matrix into a single array // of bytes (the "blob"). The contents of the blob are implementation // dependent. The blob can be saved to a file, or sent across a communication // channel, and then a GrB_Matrix can be reconstructed from the blob, even on // another process or another machine, using the same version of // SuiteSparse:GraphBLAS (v5.2.0 or later). The goal is that future versions // of SuiteSparse:GraphBLAS should be able to read in the blob as well, and // reconstruct a matrix. The matrix can be reconstructed from the blob using // GxB_Matrix_deserialize. The blob is compressed, by default, and // uncompressed by GxB_Matrix_deserialize. // GrB_Matrix_serialize/deserialize are slightly different from their GxB* // counterparts. The blob is allocated by GxB_Matrix_serialize, and must be // freed by GxB_serialize_free (which calls the ANSI C11 free if GrB_init was // used). By contrast, the GrB* methods require the user application to pass // in a preallocated blob to GrB_Matrix_serialize, whose size can be given by // GrB_Matrix_serializeSize (as a loose upper bound). // The GrB* and GxB* methods can be mixed. GrB_Matrix_serialize and // GxB_Matrix_serialize construct the same blob (assuming they are given the // same # of threads to do the work). Both GrB_Matrix_deserialize and // GxB_Matrix_deserialize can deserialize a blob coming from either // GrB_Matrix_serialize or GxB_Matrix_serialize. // Deserialization of untrusted data is a common security problem; see // https://cwe.mitre.org/data/definitions/502.html. The deserialization methods // below do a few basic checks so that no out-of-bounds access occurs during // deserialization, but the output matrix itself may still be corrupted. If // the data is untrusted, use this to check the matrix: // GxB_Matrix_fprint (A, "A deserialized", GrB_SILENT, NULL) // Example usage: /* //-------------------------------------------------------------------------- // using GxB serialize/deserialize //-------------------------------------------------------------------------- // Given a GrB_Matrix A: assuming a user-defined type: void *blob ; GrB_Index blob_size ; GxB_Matrix_serialize (&blob, &blob_size, A, NULL) ; FILE *f = fopen ("myblob", "w") ; fwrite (blob_size, sizeof (size_t), 1, f) ; fwrite (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; GrB_Matrix_free (&A) ; // B is a copy of A GxB_Matrix_deserialize (&B, MyQtype, blob, blob_size, NULL) ; GrB_Matrix_free (&B) ; free (blob) ; GrB_finalize ( ) ; // --- in another process, to recreate the GrB_Matrix A: GrB_init (GrB_NONBLOCKING) ; FILE *f = fopen ("myblob", "r") ; fread (&blob_size, sizeof (size_t), 1, f) ; blob = malloc (blob_size) ; fread (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; char type_name [GxB_MAX_NAME_LEN] ; GxB_deserialize_type_name (type_name, blob, blob_size) ; printf ("blob type is: %s\n", type_name) ; GrB_Type user_type = NULL ; if (strncmp (type_name, "myquaternion", GxB_MAX_NAME_LEN) == 0) user_type = MyQtype ; GxB_Matrix_deserialize (&A, user_type, blob, blob_size, NULL) ; free (blob) ; // note, freed by the user, not GraphBLAS //-------------------------------------------------------------------------- // using GrB serialize/deserialize //-------------------------------------------------------------------------- // Given a GrB_Matrix A: assuming a user-defined type, MyQType: void *blob = NULL ; GrB_Index blob_size = 0 ; GrB_Matrix A, B = NULL ; // construct a matrix A, then serialized it: GrB_Matrix_serializeSize (&blob_size, A) ; // loose upper bound blob = malloc (blob_size) ; GrB_Matrix_serialize (blob, &blob_size, A) ; // returns actual size blob = realloc (blob, blob_size) ; // user can shrink the blob FILE *f = fopen ("myblob", "w") ; fwrite (blob_size, sizeof (size_t), 1, f) ; fwrite (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; GrB_Matrix_free (&A) ; // B is a copy of A: GrB_Matrix_deserialize (&B, MyQtype, blob, blob_size) ; GrB_Matrix_free (&B) ; free (blob) ; GrB_finalize ( ) ; // --- in another process, to recreate the GrB_Matrix A: GrB_init (GrB_NONBLOCKING) ; FILE *f = fopen ("myblob", "r") ; fread (&blob_size, sizeof (size_t), 1, f) ; blob = malloc (blob_size) ; fread (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; // the user must know the type of A is MyQType GrB_Matrix_deserialize (&A, MyQtype, blob, blob_size) ; free (blob) ; */ // Three methods are currently implemented: no compression, LZ4, and LZ4HC #define GxB_COMPRESSION_NONE -1 // no compression #define GxB_COMPRESSION_DEFAULT 0 // LZ4 #define GxB_COMPRESSION_LZ4 1000 // LZ4 #define GxB_COMPRESSION_LZ4HC 2000 // LZ4HC, with default level 9 // possible future methods that could be added: // #define GxB_COMPRESSION_ZLIB 3000 // ZLIB, with default level 6 // #define GxB_COMPRESSION_LZO 4000 // LZO, with default level 2 // #define GxB_COMPRESSION_BZIP2 5000 // BZIP2, with default level 9 // #define GxB_COMPRESSION_LZSS 6000 // LZSS // using the Intel IPP versions, if available (not yet supported); #define GxB_COMPRESSION_INTEL 1000000 // Most of the above methods have a level parameter that controls the tradeoff // between run time and the amount of compression obtained. Higher levels // result in a more compact result, at the cost of higher run time: // LZ4 no level setting // LZ4HC 1: fast, 9: default, 9: max // these methos are not yet supported but may be added in the future: // ZLIB 1: fast, 6: default, 9: max // LZO 1: fast (X1ST), 2: default (XST) // BZIP2 1: fast, 9: default, 9: max // LZSS no level setting // For all methods, a level of zero results in the default level setting. // These settings can be added, so to use LZ4HC at level 5, use method = // GxB_COMPRESSION_LZ4HC + 5. // If the Intel IPPS compression methods are available, they can be selected // by adding GxB_COMPRESSION_INTEL. For example, to use the Intel IPPS // implementation of LZ4HC at level 9, use method = GxB_COMPRESSION_INTEL + // GxB_COMPRESSION_LZ4HC + 9 = 1,002,009. If the Intel methods are requested // but not available, this setting is ignored and the non-Intel methods are // used instead. // If the level setting is out of range, the default is used for that method. // If the method is negative, no compression is performed. If the method is // positive but unrecognized, the default is used (GxB_COMPRESSION_LZ4, with no // level setting, and the non-Intel version). // If a method is not implemented, LZ4 is used instead, and the level setting // is ignored. GB_PUBLIC GrB_Info GxB_Matrix_serialize // serialize a GrB_Matrix to a blob ( // output: void **blob_handle, // the blob, allocated on output GrB_Index *blob_size_handle, // size of the blob on output // input: GrB_Matrix A, // matrix to serialize const GrB_Descriptor desc // descriptor to select compression method // and to control # of threads used ) ; GB_PUBLIC GrB_Info GrB_Matrix_serialize // serialize a GrB_Matrix to a blob ( // output: void *blob, // the blob, already allocated in input // input/output: GrB_Index *blob_size_handle, // size of the blob on input. On output, // the # of bytes used in the blob. // input: GrB_Matrix A // matrix to serialize ) ; GB_PUBLIC GrB_Info GxB_Vector_serialize // serialize a GrB_Vector to a blob ( // output: void **blob_handle, // the blob, allocated on output GrB_Index *blob_size_handle, // size of the blob on output // input: GrB_Vector u, // vector to serialize const GrB_Descriptor desc // descriptor to select compression method // and to control # of threads used ) ; GB_PUBLIC GrB_Info GrB_Matrix_serializeSize // estimate the size of a blob ( // output: GrB_Index *blob_size_handle, // upper bound on the required size of the // blob on output. // input: GrB_Matrix A // matrix to serialize ) ; // The GrB* and GxB* deserialize methods are nearly identical. The GxB* // deserialize methods simply add the descriptor, which allows for optional // control of the # of threads used to deserialize the blob. GB_PUBLIC GrB_Info GxB_Matrix_deserialize // deserialize blob into a GrB_Matrix ( // output: GrB_Matrix *C, // output matrix created from the blob // input: GrB_Type type, // type of the matrix C. Required if the blob holds a // matrix of user-defined type. May be NULL if blob // holds a built-in type; otherwise must match the // type of C. const void *blob, // the blob GrB_Index blob_size, // size of the blob const GrB_Descriptor desc // to control # of threads used ) ; GB_PUBLIC GrB_Info GrB_Matrix_deserialize // deserialize blob into a GrB_Matrix ( // output: GrB_Matrix *C, // output matrix created from the blob // input: GrB_Type type, // type of the matrix C. Required if the blob holds a // matrix of user-defined type. May be NULL if blob // holds a built-in type; otherwise must match the // type of C. const void *blob, // the blob GrB_Index blob_size // size of the blob ) ; GB_PUBLIC GrB_Info GxB_Vector_deserialize // deserialize blob into a GrB_Vector ( // output: GrB_Vector *w, // output vector created from the blob // input: GrB_Type type, // type of the vector w. Required if the blob holds a // vector of user-defined type. May be NULL if blob // holds a built-in type; otherwise must match the // type of w. const void *blob, // the blob GrB_Index blob_size, // size of the blob const GrB_Descriptor desc // to control # of threads used ) ; // GxB_deserialize_type_name extracts the type_name of the GrB_Type of the // GrB_Matrix or GrB_Vector held in a serialized blob. On input, type_name // must point to a user-owned char array of size at least GxB_MAX_NAME_LEN (it // must not point into the blob itself). On output, type_name will contain a // null-terminated string with the corresponding C type name. If the blob // holds a matrix of a built-in type, the name is returned as "bool" for // GrB_BOOL, "uint8_t" for GrB_UINT8, "float complex" for GxB_FC32, etc. // See GxB_Type_name to convert this name into a GrB_Type. GB_PUBLIC GrB_Info GxB_deserialize_type_name // return the type name of a blob ( // output: char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). // input, not modified: const void *blob, // the blob GrB_Index blob_size // size of the blob ) ; //============================================================================== // GxB_Vector_sort and GxB_Matrix_sort: sort a matrix or vector //============================================================================== GB_PUBLIC GrB_Info GxB_Vector_sort ( // output: GrB_Vector w, // vector of sorted values GrB_Vector p, // vector containing the permutation // input GrB_BinaryOp op, // comparator op GrB_Vector u, // vector to sort const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_sort ( // output: GrB_Matrix C, // matrix of sorted values GrB_Matrix P, // matrix containing the permutations // input GrB_BinaryOp op, // comparator op GrB_Matrix A, // matrix to sort const GrB_Descriptor desc ) ; #define GxB_sort(arg1,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : GxB_Vector_sort , \ GrB_Matrix : GxB_Matrix_sort \ ) \ (arg1, __VA_ARGS__) //============================================================================== // GxB_Iterator: an object that iterates over the entries of a matrix or vector //============================================================================== /* Example usage: single thread iteration of a whole matrix, one row at a time (in the outer loop), and one entry at a time within the row (in the inner loop): // create an iterator GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; // attach it to the matrix A, known to be type GrB_FP64 GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ; if (info < 0) { handle the failure ... } // seek to A(0,:) info = GxB_rowIterator_seekRow (iterator, 0) ; while (info != GxB_EXHAUSTED) { // iterate over entries in A(i,:) GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ; while (info == GrB_SUCCESS) { // get the entry A(i,j) GrB_Index j = GxB_rowIterator_getColIndex (iterator) ; double aij = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in A(i,:) info = GxB_rowIterator_nextCol (iterator) ; } // move to the next row, A(i+1,:) info = GxB_rowIterator_nextRow (iterator) ; } GrB_free (&iterator) ; parallel iteration using 4 threads (work may be imbalanced however): GrB_Index nrows ; GrB_wait (A, GrB_MATERIALIZE) ; // this is essential GrB_Matrix_nrows (&nrows, A) ; #pragma omp parallel for num_threads(4) for (int tid = 0 ; tid < 4 ; tid++) { // thread tid operates on A(row1:row2-1,:) GrB_Index row1 = tid * (nrows / 4) ; GrB_Index row2 = (tid == 3) ? nrows : ((tid+1) * (nrows / 4)) ; GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ; if (info < 0) { handle the failure ... } // seek to A(row1,:) info = GxB_rowIterator_seekRow (iterator, row1) ; while (info != GxB_EXHAUSTED) { // iterate over entries in A(i,:) GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ; if (i >= row2) break ; while (info == GrB_SUCCESS) { // get the entry A(i,j) GrB_Index j = GxB_rowIterator_getColIndex (iterator) ; double aij = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in A(i,:) info = GxB_rowIterator_nextCol (iterator) ; } // move to the next row, A(i+1,:) info = GxB_rowIterator_nextRow (iterator) ; } GrB_free (&iterator) ; } In the parallel example above, a more balanced work distribution can be obtained by first computing the row degree via GrB_mxv (see LAGraph), and then compute the cumulative sum (ideally in parallel). Next, partition the cumulative sum into one part per thread via binary search, and divide the rows into parts accordingly. */ //------------------------------------------------------------------------------ // GxB_Iterator: definition and new/free methods //------------------------------------------------------------------------------ // The contents of an iterator must not be directly accessed by the user // application. Only the functions and macros provided here may access // "iterator->..." contents. The iterator is defined here only so that macros // can be used to speed up the use of the iterator methods. User applications // must not use "iterator->..." directly. struct GB_Iterator_opaque { // these components change as the iterator moves (via seek or next): int64_t pstart ; // the start of the current vector int64_t pend ; // the end of the current vector int64_t p ; // position of the current entry int64_t k ; // the current vector // only changes when the iterator is created: size_t header_size ; // size of this iterator object // these components only change when the iterator is attached: int64_t pmax ; // avlen*avdim for bitmap; nvals(A) otherwise int64_t avlen ; // length of each vector in the matrix int64_t avdim ; // number of vectors in the matrix dimension int64_t anvec ; // # of vectors present in the matrix const int64_t *GB_restrict Ap ; // pointers for sparse and hypersparse const int64_t *GB_restrict Ah ; // vector names for hypersparse const int8_t *GB_restrict Ab ; // bitmap const int64_t *GB_restrict Ai ; // indices for sparse and hypersparse const void *GB_restrict Ax ; // values for all 4 data structures size_t type_size ; // size of the type of A int A_sparsity ; // sparse, hyper, bitmap, or full bool iso ; // true if A is iso-valued, false otherwise bool by_col ; // true if A is held by column, false if by row } ; typedef struct GB_Iterator_opaque *GxB_Iterator ; // GxB_Iterator_new: create a new iterator, not attached to any matrix/vector GB_PUBLIC GrB_Info GxB_Iterator_new (GxB_Iterator *iterator) ; // GxB_Iterator_free: free an iterator GB_PUBLIC GrB_Info GxB_Iterator_free (GxB_Iterator *iterator) ; //============================================================================== // GB_Iterator_*: implements user-callable GxB_*Iterator_* methods //============================================================================== // GB_* methods are not user-callable. These methods appear here so that the // iterator methods can be done via macros. //------------------------------------------------------------------------------ // GB_Iterator_attach: attach a row/col/entry iterator to a matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_Iterator_attach ( GxB_Iterator iterator, // iterator to attach to the matrix A GrB_Matrix A, // matrix to attach GxB_Format_Value format, // by row, by col, or by entry (GxB_NO_FORMAT) GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GB_Iterator_rc_seek: seek a row/col iterator to a particular vector //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_Iterator_rc_seek ( GxB_Iterator iterator, GrB_Index j, bool jth_vector ) ; //------------------------------------------------------------------------------ // GB_Iterator_rc_bitmap_next: move a row/col iterator to next entry in bitmap //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_Iterator_rc_bitmap_next (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GB_Iterator_rc_knext: move a row/col iterator to the next vector //------------------------------------------------------------------------------ #define GB_Iterator_rc_knext(iterator) \ ( \ /* move to the next vector, and check if iterator is exhausted */ \ (++(iterator->k) >= iterator->anvec) ? \ ( \ /* iterator is at the end of the matrix */ \ iterator->pstart = 0, \ iterator->pend = 0, \ iterator->p = 0, \ iterator->k = iterator->anvec, \ GxB_EXHAUSTED \ ) \ : \ ( \ /* find first entry in vector, and pstart/pend for this vector */ \ (iterator->A_sparsity <= GxB_SPARSE) ? \ ( \ /* matrix is sparse or hypersparse */ \ iterator->pstart = iterator->Ap [iterator->k], \ iterator->pend = iterator->Ap [iterator->k+1], \ iterator->p = iterator->pstart, \ ((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \ ) \ : \ ( \ /* matrix is bitmap or full */ \ iterator->pstart += iterator->avlen, \ iterator->pend += iterator->avlen, \ iterator->p = iterator->pstart, \ (iterator->A_sparsity <= GxB_BITMAP) ? \ ( \ /* matrix is bitmap */ \ GB_Iterator_rc_bitmap_next (iterator) \ ) \ : \ ( \ /* matrix is full */ \ ((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \ ) \ ) \ ) \ ) //------------------------------------------------------------------------------ // GB_Iterator_rc_inext: move a row/col iterator the next entry in the vector //------------------------------------------------------------------------------ #define GB_Iterator_rc_inext(iterator) \ ( \ /* move to the next entry in the vector */ \ (++(iterator->p) >= iterator->pend) ? \ ( \ /* no more entries in the current vector */ \ GrB_NO_VALUE \ ) \ : \ ( \ (iterator->A_sparsity == GxB_BITMAP) ? \ ( \ /* the matrix is in bitmap form */ \ GB_Iterator_rc_bitmap_next (iterator) \ ) \ : \ ( \ GrB_SUCCESS \ ) \ ) \ ) //------------------------------------------------------------------------------ // GB_Iterator_rc_getj: get index of current vector for row/col iterator //------------------------------------------------------------------------------ #define GB_Iterator_rc_getj(iterator) \ ( \ (iterator->k >= iterator->anvec) ? \ ( \ /* iterator is past the end of the matrix */ \ iterator->avdim \ ) \ : \ ( \ (iterator->A_sparsity == GxB_HYPERSPARSE) ? \ ( \ /* return the name of kth vector: j = Ah [k] if it appears */ \ iterator->Ah [iterator->k] \ ) \ : \ ( \ /* return the kth vector: j = k */ \ iterator->k \ ) \ ) \ ) //------------------------------------------------------------------------------ // GB_Iterator_rc_geti: return index of current entry for row/col iterator //------------------------------------------------------------------------------ #define GB_Iterator_rc_geti(iterator) \ ( \ (iterator->Ai != NULL) ? \ ( \ iterator->Ai [iterator->p] \ ) \ : \ ( \ (iterator->p - iterator->pstart) \ ) \ ) //============================================================================== // GxB_rowIterator_*: iterate over the rows of a matrix //============================================================================== #undef GxB_rowIterator_attach #undef GxB_rowIterator_kount #undef GxB_rowIterator_seekRow #undef GxB_rowIterator_kseek #undef GxB_rowIterator_nextRow #undef GxB_rowIterator_nextCol #undef GxB_rowIterator_getRowIndex #undef GxB_rowIterator_getColIndex //------------------------------------------------------------------------------ // GxB_rowIterator_attach: attach a row iterator to a matrix //------------------------------------------------------------------------------ // On input, the iterator must already exist, having been created by // GxB_Iterator_new. // GxB_rowIterator_attach attaches a row iterator to a matrix. If the iterator // is already attached to a matrix, it is detached and then attached to the // given matrix A. // The following error conditions are returned: // GrB_NULL_POINTER: if the iterator or A are NULL. // GrB_INVALID_OBJECT: if the matrix A is invalid. // GrB_NOT_IMPLEMENTED: if the matrix A cannot be iterated by row. // GrB_OUT_OF_MEMORY: if the method runs out of memory. // If successful, the row iterator is attached to the matrix, but not to any // specific row. Use GxB_rowIterator_*seek* to move the iterator to a row. GB_PUBLIC GrB_Info GxB_rowIterator_attach ( GxB_Iterator iterator, GrB_Matrix A, GrB_Descriptor desc ) ; #define GxB_rowIterator_attach(iterator, A, desc) \ ( \ GB_Iterator_attach (iterator, A, GxB_BY_ROW, desc) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_kount: upper bound on the # of nonempty rows of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must be attached to a matrix, but need not be at // any specific row; results are undefined if this condition is not met. // GxB_rowIterator_kount returns an upper bound on the # of non-empty rows of a // matrix. A GraphBLAS library may always return this as simply nrows(A), but // in some libraries, it may be a value between the # of rows with at least one // entry, and nrows(A), inclusive. Any value in this range is a valid return // value from this function. // For SuiteSparse:GraphBLAS: If A is m-by-n, and sparse, bitmap, or full, then // kount == m. If A is hypersparse, kount is the # of vectors held in the data // structure for the matrix, some of which may be empty, and kount <= m. GB_PUBLIC GrB_Index GxB_rowIterator_kount (GxB_Iterator iterator) ; #define GxB_rowIterator_kount(iterator) \ ( \ (iterator)->anvec \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_seekRow: move a row iterator to a different row of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must be attached to a matrix, but need not be at // any specific row; results are undefined if this condition is not met. // GxB_rowIterator_seekRow moves a row iterator to the first entry of A(row,:). // If A(row,:) has no entries, the iterator may move to the first entry of next // nonempty row i for some i > row. The row index can be determined by // GxB_rowIterator_getRowIndex. // For SuiteSparse:GraphBLAS: If the matrix is hypersparse, and the row // does not appear in the hyperlist, then the iterator is moved to the first // row after the given row that does appear in the hyperlist. // The method is always successful; the following are conditions are returned: // GxB_EXHAUSTED: if the row index is >= nrows(A); the row iterator is // exhausted, but is still attached to the matrix. // GrB_NO_VALUE: if the row index is valid but A(row,:) has no entries; the // row iterator is positioned at A(row,:). // GrB_SUCCESS: if the row index is valid and A(row,:) has at least one // entry. The row iterator is positioned at A(row,:). // GxB_rowIterator_get* can be used to return the indices of // the first entry in A(row,:), and GxB_Iterator_get* can // return its value. GB_PUBLIC GrB_Info GxB_rowIterator_seekRow (GxB_Iterator iterator, GrB_Index row) ; #define GxB_rowIterator_seekRow(iterator, row) \ ( \ GB_Iterator_rc_seek (iterator, row, false) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_kseek: move a row iterator to a different row of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must be attached to a matrix, but need not be at // any specific row; results are undefined if this condition is not met. // GxB_rowIterator_kseek is identical to GxB_rowIterator_seekRow, except for // how the row index is specified. The row is the kth non-empty row of A. // More precisely, k is in the range 0 to kount-1, where kount is the value // returned by GxB_rowIterator_kount. GB_PUBLIC GrB_Info GxB_rowIterator_kseek (GxB_Iterator iterator, GrB_Index k) ; #define GxB_rowIterator_kseek(iterator, k) \ ( \ GB_Iterator_rc_seek (iterator, k, true) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_nextRow: move a row iterator to the next row of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must already be attached to a matrix via a prior // call to GxB_rowIterator_attach, and the iterator must be at a specific row, // via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow; // results are undefined if this condition is not met. // If the the row iterator is currently at A(row,:), it is moved to A(row+1,:), // or to the first non-empty row after A(row,:), at the discretion of this // method. That is, empty rows may be skipped. // The method is always successful, and the return conditions are identical to // the return conditions of GxB_rowIterator_seekRow. GB_PUBLIC GrB_Info GxB_rowIterator_nextRow (GxB_Iterator iterator) ; #define GxB_rowIterator_nextRow(iterator) \ ( \ GB_Iterator_rc_knext (iterator) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_nextCol: move a row iterator to the next entry in A(row,:) //------------------------------------------------------------------------------ // On input, the row iterator must already be attached to a matrix via a prior // call to GxB_rowIterator_attach, and the iterator must be at a specific row, // via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow; // results are undefined if this condition is not met. // The method is always successful, and returns the following conditions: // GrB_NO_VALUE: If the iterator is already exhausted, or if there is no // entry in the current A(row,;), // GrB_SUCCESS: If the row iterator has been moved to the next entry in // A(row,:). GB_PUBLIC GrB_Info GxB_rowIterator_nextCol (GxB_Iterator iterator) ; #define GxB_rowIterator_nextCol(iterator) \ ( \ GB_Iterator_rc_inext ((iterator)) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_getRowIndex: get current row index of a row iterator //------------------------------------------------------------------------------ // On input, the iterator must be already successfully attached to matrix as a // row iterator; results are undefined if this condition is not met. // The method returns nrows(A) if the iterator is exhausted, or the current // row index otherwise. There need not be any entry in the current row. // Zero is returned if the iterator is attached to the matrix but // GxB_rowIterator_*seek* has not been called, but this does not mean the // iterator is positioned at row zero. GB_PUBLIC GrB_Index GxB_rowIterator_getRowIndex (GxB_Iterator iterator) ; #define GxB_rowIterator_getRowIndex(iterator) \ ( \ GB_Iterator_rc_getj ((iterator)) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_getColIndex: get current column index of a row iterator //------------------------------------------------------------------------------ // On input, the iterator must be already successfully attached to matrix as a // row iterator, and in addition, the row iterator must be positioned at a // valid entry present in the matrix. That is, the last call to // GxB_rowIterator_*seek* or GxB_rowIterator_*next*, must have returned // GrB_SUCCESS. Results are undefined if this condition is not met. GB_PUBLIC GrB_Index GxB_rowIterator_getColIndex (GxB_Iterator iterator) ; #define GxB_rowIterator_getColIndex(iterator) \ ( \ GB_Iterator_rc_geti ((iterator)) \ ) //============================================================================== // GxB_colIterator_*: iterate over columns of a matrix //============================================================================== // The column iterator is analoguous to the row iterator. #undef GxB_colIterator_attach #undef GxB_colIterator_kount #undef GxB_colIterator_seekCol #undef GxB_colIterator_kseek #undef GxB_colIterator_nextCol #undef GxB_colIterator_nextRow #undef GxB_colIterator_getColIndex #undef GxB_colIterator_getRowIndex // GxB_colIterator_attach: attach a column iterator to a matrix GB_PUBLIC GrB_Info GxB_colIterator_attach ( GxB_Iterator iterator, GrB_Matrix A, GrB_Descriptor desc ) ; #define GxB_colIterator_attach(iterator, A, desc) \ ( \ GB_Iterator_attach (iterator, A, GxB_BY_COL, desc) \ ) // GxB_colIterator_kount: return # of nonempty columns of the matrix GB_PUBLIC GrB_Index GxB_colIterator_kount (GxB_Iterator iterator) ; #define GxB_colIterator_kount(iterator) \ ( \ (iterator)->anvec \ ) // GxB_colIterator_seekCol: move a column iterator to A(:,col) GB_PUBLIC GrB_Info GxB_colIterator_seekCol (GxB_Iterator iterator, GrB_Index col) ; #define GxB_colIterator_seekCol(iterator, col) \ ( \ GB_Iterator_rc_seek (iterator, col, false) \ ) // GxB_colIterator_kseek: move a column iterator to kth non-empty column of A GB_PUBLIC GrB_Info GxB_colIterator_kseek (GxB_Iterator iterator, GrB_Index k) ; #define GxB_colIterator_kseek(iterator, k) \ ( \ GB_Iterator_rc_seek (iterator, k, true) \ ) // GxB_colIterator_nextCol: move a column iterator to first entry of next column GB_PUBLIC GrB_Info GxB_colIterator_nextCol (GxB_Iterator iterator) ; #define GxB_colIterator_nextCol(iterator) \ ( \ GB_Iterator_rc_knext ((iterator)) \ ) // GxB_colIterator_nextRow: move a column iterator to next entry in column GB_PUBLIC GrB_Info GxB_colIterator_nextRow (GxB_Iterator iterator) ; #define GxB_colIterator_nextRow(iterator) \ ( \ GB_Iterator_rc_inext ((iterator)) \ ) // GxB_colIterator_getColIndex: return the column index of current entry GB_PUBLIC GrB_Index GxB_colIterator_getColIndex (GxB_Iterator iterator) ; #define GxB_colIterator_getColIndex(iterator) \ ( \ GB_Iterator_rc_getj ((iterator)) \ ) // GxB_colIterator_getRowIndex: return the row index of current entry GB_PUBLIC GrB_Index GxB_colIterator_getRowIndex (GxB_Iterator iterator) ; #define GxB_colIterator_getRowIndex(iterator) \ ( \ GB_Iterator_rc_geti ((iterator)) \ ) //============================================================================== // GxB_Matrix_Iterator_*: iterate over the entries of a matrix //============================================================================== // Example usage: // single thread iteration of a whole matrix, one entry at at time /* // create an iterator GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; // attach it to the matrix A, known to be type GrB_FP64 GrB_Info info = GxB_Matrix_Iterator_attach (iterator, A, NULL) ; if (info < 0) { handle the failure ... } // seek to the first entry info = GxB_Matrix_Iterator_seek (iterator, 0) ; while (info != GxB_EXHAUSTED) { // get the entry A(i,j) GrB_Index i, j ; GxB_Matrix_Iterator_getIndex (iterator, &i, &j) ; double aij = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in A info = GxB_Matrix_Iterator_next (iterator) ; } GrB_free (&iterator) ; */ //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_attach: attach an entry iterator to a matrix //------------------------------------------------------------------------------ // On input, the iterator must already exist, having been created by // GxB_Iterator_new. // GxB_Matrix_Iterator_attach attaches an entry iterator to a matrix. If the // iterator is already attached to a matrix, it is detached and then attached // to the given matrix A. // The following error conditions are returned: // GrB_NULL_POINTER: if the iterator or A are NULL. // GrB_INVALID_OBJECT: if the matrix A is invalid. // GrB_OUT_OF_MEMORY: if the method runs out of memory. // If successful, the entry iterator is attached to the matrix, but not to any // specific entry. Use GxB_Matrix_Iterator_*seek* to move the iterator to a // particular entry. GB_PUBLIC GrB_Info GxB_Matrix_Iterator_attach ( GxB_Iterator iterator, GrB_Matrix A, GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_getpmax: return the range of the iterator //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach; results are undefined if this condition is not // met. // Entries in a matrix are given an index p, ranging from 0 to pmax-1, where // pmax >= nvals(A). For sparse, hypersparse, and full matrices, pmax is equal // to nvals(A). For an m-by-n bitmap matrix, pmax=m*n, or pmax=0 if the // matrix has no entries. GB_PUBLIC GrB_Index GxB_Matrix_Iterator_getpmax (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_seek: seek to a specific entry //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach; results are undefined if this condition is not // met. // The input p is in range 0 to pmax-1, which points to an entry in the matrix, // or p >= pmax if the iterator is exhausted, where pmax is the return value // from GxB_Matrix_Iterator_getpmax. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // matrix, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GxB_Matrix_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_next: move to the next entry of a matrix //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Matrix_Iterator_seek or // GxB_Matrix_Iterator_next. Results are undefined if these conditions are not // met. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // matrix, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GxB_Matrix_Iterator_next (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_getp: get the current position of a matrix iterator //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Matrix_Iterator_seek or // GxB_Matrix_Iterator_next. Results are undefined if these conditions are not // met. GB_PUBLIC GrB_Index GxB_Matrix_Iterator_getp (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_getIndex: get the row and column index of a matrix entry //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Matrix_Iterator_seek or // GxB_Matrix_Iterator_next, with a return value of GrB_SUCCESS. Results are // undefined if these conditions are not met. GB_PUBLIC void GxB_Matrix_Iterator_getIndex ( GxB_Iterator iterator, GrB_Index *row, GrB_Index *col ) ; //============================================================================== // GxB_Vector_Iterator_*: iterate over the entries of a vector //============================================================================== /* Example usage: single thread iteration of a whole vector, one entry at at time // create an iterator GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; // attach it to the vector v, known to be type GrB_FP64 GrB_Info info = GxB_Vector_Iterator_attach (iterator, v, NULL) ; if (info < 0) { handle the failure ... } // seek to the first entry info = GxB_Vector_Iterator_seek (iterator, 0) ; while (info != GxB_EXHAUSTED) { // get the entry v(i) GrB_Index i = GxB_Vector_Iterator_getIndex (iterator) ; double vi = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in v info = GxB_Vector_Iterator_next (iterator) ; } GrB_free (&iterator) ; */ #undef GxB_Vector_Iterator_getpmax #undef GxB_Vector_Iterator_seek #undef GxB_Vector_Iterator_next #undef GxB_Vector_Iterator_getp #undef GxB_Vector_Iterator_getIndex //------------------------------------------------------------------------------ // GxB_Vector_Iterator_attach: attach an iterator to a vector //------------------------------------------------------------------------------ // On input, the iterator must already exist, having been created by // GxB_Iterator_new. // GxB_Vector_Iterator_attach attaches an iterator to a vector. If the // iterator is already attached to a vector or matrix, it is detached and then // attached to the given vector v. // The following error conditions are returned: // GrB_NULL_POINTER: if the iterator or v are NULL. // GrB_INVALID_OBJECT: if the vector v is invalid. // GrB_OUT_OF_MEMORY: if the method runs out of memory. // If successful, the iterator is attached to the vector, but not to any // specific entry. Use GxB_Vector_Iterator_seek to move the iterator to a // particular entry. GB_PUBLIC GrB_Info GxB_Vector_Iterator_attach ( GxB_Iterator iterator, GrB_Vector v, GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GxB_Vector_Iterator_getpmax: return the range of the vector iterator //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach; results are undefined if this condition is not // met. // Entries in a vector are given an index p, ranging from 0 to pmax-1, where // pmax >= nvals(v). For sparse and full vectors, pmax is equal to nvals(v). // For a size-m bitmap vector, pmax=m, or pmax=0 if the vector has no entries. GB_PUBLIC GrB_Index GxB_Vector_Iterator_getpmax (GxB_Iterator iterator) ; #define GxB_Vector_Iterator_getpmax(iterator) \ ( \ (iterator->pmax) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_seek: seek to a specific entry in the vector //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach; results are undefined if this condition is not // met. // The input p is in range 0 to pmax-1, which points to an entry in the vector, // or p >= pmax if the iterator is exhausted, where pmax is the return value // from GxB_Vector_Iterator_getpmax. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // vector, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GB_Vector_Iterator_bitmap_seek (GxB_Iterator iterator, GrB_Index unused) ; // unused parameter to be removed in v8.x GB_PUBLIC GrB_Info GxB_Vector_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ; #define GB_Vector_Iterator_seek(iterator, q) \ ( \ (q >= iterator->pmax) ? \ ( \ /* the iterator is exhausted */ \ iterator->p = iterator->pmax, \ GxB_EXHAUSTED \ ) \ : \ ( \ /* seek to an arbitrary position in the vector */ \ iterator->p = q, \ (iterator->A_sparsity == GxB_BITMAP) ? \ ( \ GB_Vector_Iterator_bitmap_seek (iterator, 0) \ ) \ : \ ( \ GrB_SUCCESS \ ) \ ) \ ) #define GxB_Vector_Iterator_seek(iterator, p) \ ( \ GB_Vector_Iterator_seek (iterator, p) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_next: move to the next entry of a vector //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Vector_Iterator_seek or // GxB_Vector_Iterator_next. Results are undefined if these conditions are not // met. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // vector, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GxB_Vector_Iterator_next (GxB_Iterator iterator) ; #define GB_Vector_Iterator_next(iterator) \ ( \ /* move to the next entry */ \ (++(iterator->p) >= iterator->pmax) ? \ ( \ /* the iterator is exhausted */ \ iterator->p = iterator->pmax, \ GxB_EXHAUSTED \ ) \ : \ ( \ (iterator->A_sparsity == GxB_BITMAP) ? \ ( \ /* bitmap: seek to the next entry present in the bitmap */ \ GB_Vector_Iterator_bitmap_seek (iterator, 0) \ ) \ : \ ( \ /* other formats: already at the next entry */ \ GrB_SUCCESS \ ) \ ) \ ) #define GxB_Vector_Iterator_next(iterator) \ ( \ GB_Vector_Iterator_next (iterator) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_getp: get the current position of a vector iterator //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Vector_Iterator_seek or // GxB_Vector_Iterator_next. Results are undefined if these conditions are not // met. GB_PUBLIC GrB_Index GxB_Vector_Iterator_getp (GxB_Iterator iterator) ; #define GxB_Vector_Iterator_getp(iterator) \ ( \ (iterator->p) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_getIndex: get the index of a vector entry //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Vector_Iterator_seek or // GxB_Vector_Iterator_next, with a return value of GrB_SUCCESS. Results are // undefined if these conditions are not met. GB_PUBLIC GrB_Index GxB_Vector_Iterator_getIndex (GxB_Iterator iterator) ; #define GxB_Vector_Iterator_getIndex(iterator) \ ( \ ((iterator->Ai != NULL) ? iterator->Ai [iterator->p] : iterator->p) \ ) //============================================================================== // GxB_Iterator_get_TYPE: get value of the current entry for any iterator //============================================================================== // On input, the prior call to GxB_*Iterator_*seek*, or GxB_*Iterator_*next* // must have returned GrB_SUCCESS, indicating that the iterator is at a valid // current entry for either a matrix or vector. // Returns the value of the current entry at the position determined by the // iterator. No typecasting is permitted; the method name must match the // type of the matrix or vector. #undef GxB_Iterator_get_BOOL #undef GxB_Iterator_get_INT8 #undef GxB_Iterator_get_INT16 #undef GxB_Iterator_get_INT32 #undef GxB_Iterator_get_INT64 #undef GxB_Iterator_get_UINT8 #undef GxB_Iterator_get_UINT16 #undef GxB_Iterator_get_UINT32 #undef GxB_Iterator_get_UINT64 #undef GxB_Iterator_get_FP32 #undef GxB_Iterator_get_FP64 #undef GxB_Iterator_get_FC32 #undef GxB_Iterator_get_FC64 #undef GxB_Iterator_get_UDT GB_PUBLIC bool GxB_Iterator_get_BOOL (GxB_Iterator iterator) ; GB_PUBLIC int8_t GxB_Iterator_get_INT8 (GxB_Iterator iterator) ; GB_PUBLIC int16_t GxB_Iterator_get_INT16 (GxB_Iterator iterator) ; GB_PUBLIC int32_t GxB_Iterator_get_INT32 (GxB_Iterator iterator) ; GB_PUBLIC int64_t GxB_Iterator_get_INT64 (GxB_Iterator iterator) ; GB_PUBLIC uint8_t GxB_Iterator_get_UINT8 (GxB_Iterator iterator) ; GB_PUBLIC uint16_t GxB_Iterator_get_UINT16 (GxB_Iterator iterator) ; GB_PUBLIC uint32_t GxB_Iterator_get_UINT32 (GxB_Iterator iterator) ; GB_PUBLIC uint64_t GxB_Iterator_get_UINT64 (GxB_Iterator iterator) ; GB_PUBLIC float GxB_Iterator_get_FP32 (GxB_Iterator iterator) ; GB_PUBLIC double GxB_Iterator_get_FP64 (GxB_Iterator iterator) ; GB_PUBLIC GxB_FC32_t GxB_Iterator_get_FC32 (GxB_Iterator iterator) ; GB_PUBLIC GxB_FC64_t GxB_Iterator_get_FC64 (GxB_Iterator iterator) ; GB_PUBLIC void GxB_Iterator_get_UDT (GxB_Iterator iterator, void *value) ; #define GB_Iterator_get(iterator, type) \ ( \ (((type *) (iterator)->Ax) [(iterator)->iso ? 0 : (iterator)->p]) \ ) #define GxB_Iterator_get_BOOL(iterator) GB_Iterator_get (iterator, bool) #define GxB_Iterator_get_INT8(iterator) GB_Iterator_get (iterator, int8_t) #define GxB_Iterator_get_INT16(iterator) GB_Iterator_get (iterator, int16_t) #define GxB_Iterator_get_INT32(iterator) GB_Iterator_get (iterator, int32_t) #define GxB_Iterator_get_INT64(iterator) GB_Iterator_get (iterator, int64_t) #define GxB_Iterator_get_UINT8(iterator) GB_Iterator_get (iterator, uint8_t) #define GxB_Iterator_get_UINT16(iterator) GB_Iterator_get (iterator, uint16_t) #define GxB_Iterator_get_UINT32(iterator) GB_Iterator_get (iterator, uint32_t) #define GxB_Iterator_get_UINT64(iterator) GB_Iterator_get (iterator, uint64_t) #define GxB_Iterator_get_FP32(iterator) GB_Iterator_get (iterator, float) #define GxB_Iterator_get_FP64(iterator) GB_Iterator_get (iterator, double) #define GxB_Iterator_get_FC32(iterator) GB_Iterator_get (iterator, GxB_FC32_t) #define GxB_Iterator_get_FC64(iterator) GB_Iterator_get (iterator, GxB_FC64_t) #define GxB_Iterator_get_UDT(iterator, value) \ ( \ (void) memcpy ((void *) value, ((const uint8_t *) ((iterator)->Ax)) + \ ((iterator)->iso ? 0 : ((iterator)->type_size * (iterator)->p)), \ (iterator)->type_size) \ ) //------------------------------------------------------------------------------ // Rapids Memory Manager wrappers for SuiteSparse:GraphBLAS //------------------------------------------------------------------------------ #ifndef RMM_WRAP_H #define RMM_WRAP_H #include <stddef.h> #include <stdio.h> #ifdef __cplusplus extern "C" { #endif // TODO describe the modes typedef enum { rmm_wrap_host=0, rmm_wrap_host_pinned=1, rmm_wrap_device=2, rmm_wrap_managed=3 } RMM_MODE ; void rmm_wrap_finalize (void) ; int rmm_wrap_initialize (RMM_MODE mode, size_t init_pool_size, size_t max_pool_size) ; // example usage: // rmm_wrap_initialize (rmm_wrap_managed, INT32_MAX, INT64_MAX) ; // GxB_init (GrB_NONBLOCKING, rmm_wrap_malloc, rmm_wrap_calloc, rmm_wrap_realloc, rmm_wrap_free) ; // use GraphBLAS ... // GrB_finalize ( ) ; // rmm_wrap_finalize ( ) ; // The two PMR-based allocate/deallocate signatures (C-style): void *rmm_wrap_allocate (size_t *size) ; void rmm_wrap_deallocate (void *p, size_t size) ; // The four malloc/calloc/realloc/free signatures: void *rmm_wrap_malloc (size_t size) ; void *rmm_wrap_calloc (size_t n, size_t size) ; void *rmm_wrap_realloc (void *p, size_t newsize) ; void rmm_wrap_free (void *p) ; #ifdef __cplusplus } #endif #endif #endif
GB_unop__identity_fc32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_uint32 // op(A') function: GB_unop_tran__identity_fc32_uint32 // C type: GxB_FC32_t // A type: uint32_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_uint32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
atomic_messages.c
// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp -fopenmp-version=50 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp-simd -fopenmp-version=50 -ferror-limit 100 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp atomic read argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } int foo() { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; // expected-error {{use of undeclared label 'L1'}} } goto L2; // expected-error {{use of undeclared label 'L2'}} #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint() { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint() { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; } void hint() { int a = 0; #pragma omp atomic hint // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected '(' after 'hint'}} a += 1; #pragma omp atomic hint( // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(+ // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(a // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expression is not an integer constant expression}} a += 1; #pragma omp atomic hint(a) // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} omp50-error {{expression is not an integer constant expression}} a += 1; #pragma omp atomic hint(1) hint(1) // omp45-error 2 {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{directive '#pragma omp atomic' cannot contain more than one 'hint' clause}} a += 1; }
openmp-ex07.c
#include <stdio.h> #include <unistd.h> #include <omp.h> int main(void) { int num_threads, my_thread; num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); printf ("\"You're all individuals!\" said %d of %d.\n", my_thread, num_threads); #pragma omp parallel private(num_threads,my_thread) { num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); sleep(1); printf("\"Yes, we're all individuals!\" replied %d of %d, sleepily.\n", my_thread, num_threads); } /* But then what happens when we try to use the original variable again: do * any of the private writes affect it? */ printf ("\"I'm not,\" said %d of %d.\n", my_thread, num_threads); return 0; }
GB_binop__minus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_01__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_03__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int32) // A*D function (colscale): GB (_AxD__minus_int32) // D*A function (rowscale): GB (_DxB__minus_int32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int32) // C=scalar+B GB (_bind1st__minus_int32) // C=scalar+B' GB (_bind1st_tran__minus_int32) // C=A+scalar GB (_bind2nd__minus_int32) // C=A'+scalar GB (_bind2nd_tran__minus_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; MemoryInfo *segment_info; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ exception=(&image->exception); x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=(MagickRealType) StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1)*cosine)+ fabs((double) (image->rows-1)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1),(image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) (MagickMax((image->columns-1), (image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1)* (image->columns-1)+(image->rows-1)*(image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1)/2.0; gradient->radii.y=(double) (image->rows-1)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) MagickMin((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **magick_restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict paint_indexes; register ssize_t x; register PixelPacket *magick_restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); ConformMagickPixelPacket(image,fill,&conform_fill,exception); ConformMagickPixelPacket(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,&conform_target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(conform_fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(conform_fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(conform_fill.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(conform_fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(conform_fill.index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
edge_data.h
/* ============================================================================== KratosPFEMApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 08:26:51 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_EDGE_DATA_H_INCLUDED ) #define KRATOS_EDGE_DATA_H_INCLUDED //we suggest defining the following macro #define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION //we suggest defining the following macro #define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "incompressible_fluid_application.h" #include "utilities/openmp_utils.h" namespace Kratos { // template<unsigned int TDim> // class EdgeConstructionScratch // { // public: // array_1d<double, TDim+1> N; // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim> dN_dx; // double volume; // double weighting_factor = 1.0 / static_cast<double>(TDim+1); // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> mass_consistent; // array_1d<double, TDim+1> mass_lumped; // array_1d<unsigned int, TDim+1> nodal_indices; // array_1d<double, TDim+1> heights; // // } //structure definition for fast access to edge data using CSR format template<unsigned int TDim> class EdgesStructureType { public: //component ij of the consistent mass matrix (M = Ni * Nj * dOmega) double Mass; //components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega) //double Laplacian; boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ; //components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega) array_1d<double, TDim> Ni_DNj; //components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega) //TRANSPOSED GRADIENT array_1d<double, TDim> DNi_Nj; //************************************************************************************* //************************************************************************************* //gradient integrated by parts //RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i //ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually" inline void Add_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } inline void Sub_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj[k]*v[k] inline void Add_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } inline void Sub_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj pj inline void Add_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * (p_j - p_i); } inline void Sub_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * (p_j - p_i); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += DNi_Nj[k]*v[k] inline void Add_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } inline void Sub_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } //************************************************************************************* //************************************************************************************* //gets the trace of the laplacian matrix inline void CalculateScalarLaplacian(double& l_ij) { l_ij = LaplacianIJ(0, 0); for (unsigned int comp = 1; comp < TDim; comp++) l_ij += LaplacianIJ(comp, comp); } inline void Add_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; #endif } inline void Sub_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; #endif } inline void Sub_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination -= temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination -= aux_j * phi_j - aux_i * phi_i; #endif } inline void Add_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination += temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination += aux_j * phi_j - aux_i * phi_i; #endif } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_LOW(array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // double temp = 0.0; // double lij = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // lij += LaplacianIJ(k_comp,k_comp); // temp = a_i[k_comp] * a_i[k_comp]; // } // // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = temp * lij * (U_j[l_comp] - U_i[l_comp]); } // inline void CalculateConvectionStabilization_LOW( array_1d<double,TDim>& stab_low, // const array_1d<double,TDim>& a_i, const array_1d<double,TDim>& U_i, const double& p_i, // const array_1d<double,TDim>& a_j, const array_1d<double,TDim>& U_j, const double& p_j // ) // { // double conv_stab = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) // conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp,m_comp); // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // //// adding pressure // double press_diff = p_j-p_i; // for (unsigned int j_comp = 0; j_comp < TDim; j_comp++) // { // for (unsigned int i_comp = 0; i_comp < TDim; i_comp++) // stab_low[j_comp] -= a_i[i_comp] * LaplacianIJ(i_comp,j_comp) * press_diff ; // } // // // } inline void CalculateConvectionStabilization_LOW(double& stab_low, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); stab_low = conv_stab * (phi_j - phi_i); } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_HIGH(array_1d<double, TDim>& stab_high, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& pi_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_j += a_i[k_comp] * Ni_DNj[k_comp]; // temp_i += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = +(temp_j*pi_j[l_comp] - temp_i*pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_i += a_i[k_comp] * Ni_DNj[k_comp]; // temp_j += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = (temp_j*pi_j[l_comp] + temp_i*pi_i[l_comp]); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]); #endif } inline void CalculateConvectionStabilization_HIGH(double& stab_high, const array_1d<double, TDim>& a_i, const double& pi_i, const array_1d<double, TDim>& a_j, const double& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; stab_high = -temp * (pi_j - pi_i); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } stab_high = -(aux_j * pi_j - aux_i * pi_i); #endif } //************************************************************************************* //************************************************************************************* inline void Add_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Add_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination += tau * (stab_low - beta * stab_high); } inline void Sub_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Sub_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination -= tau * (stab_low - beta * stab_high); } //************************************************************************************* //************************************************************************************* inline void Add_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]); } inline void Sub_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]); } }; //class definition of matrices using CSR format template<unsigned int TDim, class TSparseSpace> class MatrixContainer { public: //name for the self defined structure typedef EdgesStructureType<TDim> CSR_Tuple; typedef vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef vector<unsigned int> IndicesVectorType; //names for separately stored node based values typedef vector<double> ValuesVectorType; // typedef std::vector< array_1d<double,TDim> > CalcVectorType; typedef vector< array_1d<double, TDim> > CalcVectorType; //constructor and destructor MatrixContainer() { }; ~MatrixContainer() { }; //functions to return private values inline unsigned int GetNumberEdges() { return mNumberEdges; } inline EdgesVectorType& GetEdgeValues() { return mNonzeroEdgeValues; } inline IndicesVectorType& GetColumnIndex() { return mColumnIndex; } inline IndicesVectorType& GetRowStartIndex() { return mRowStartIndex; } inline ValuesVectorType& GetLumpedMass() { return mLumpedMassMatrix; } inline ValuesVectorType& GetInvertedMass() { return mInvertedMassMatrix; } inline CalcVectorType& GetDiagGradient() { return mDiagGradientMatrix; } inline ValuesVectorType& GetHmin() { return mHmin; } //******************************************************** //function to size and initialize the vector of CSR tuples void ConstructCSRVector(ModelPart& model_part) { KRATOS_TRY //SIZE OF CSR VECTOR //defining the number of nodes and edges int n_nodes = model_part.Nodes().size(); //remark: no colouring algorithm is used here (symmetry is neglected) // respectively edge ij is considered different from edge ji mNumberEdges = 0; //counter to assign and get global nodal index int i_node = 0; //counting the edges connecting the nodes for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++) { //counting neighbours of each node mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size(); //DIAGONAL TERMS //mNumberEdges++; //assigning global index to each node node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double> (i_node++); } //error message in case number of nodes does not coincide with number of indices if (i_node != n_nodes) KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!"); //allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation mNonzeroEdgeValues.resize(mNumberEdges); //SetToZero(mNonzeroEdgeValues); mColumnIndex.resize(mNumberEdges); //SetToZero(mColumnIndex); mRowStartIndex.resize(n_nodes + 1); //SetToZero(mRowStartIndex); mLumpedMassMatrix.resize(n_nodes); SetToZero(mLumpedMassMatrix); mInvertedMassMatrix.resize(n_nodes); SetToZero(mInvertedMassMatrix); mDiagGradientMatrix.resize(n_nodes); SetToZero(mDiagGradientMatrix); mHmin.resize(n_nodes); SetToZero(mHmin); //INITIALIZING OF THE CSR VECTOR //temporary variable as the row start index of a node depends on the number of neighbours of the previous one unsigned int row_start_temp = 0; int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<int> row_partition(number_of_threads); OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (OpenMPUtils::ThisThread() == k) { for (unsigned int aux_i = static_cast<unsigned int> (row_partition[k]); aux_i < static_cast<unsigned int> (row_partition[k + 1]); aux_i++) { typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i; //main loop over all nodes // for (typename ModelPart::NodesContainerType::iterator node_it=model_part.NodesBegin(); node_it!=model_part.NodesEnd(); node_it++) // { //getting the global index of the node i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX)); //determining its neighbours WeakPointerVector< Node < 3 > >& neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES); //number of neighbours of node i determines row start index for the following node unsigned int n_neighbours = neighb_nodes.size(); //DIAGONAL TERMS //n_neighbours++; //reserving memory for work array std::vector<unsigned int> work_array; work_array.reserve(n_neighbours); //DIAGONAL TERMS //work_array.push_back(i_node); //nested loop over the neighbouring nodes for (WeakPointerVector< Node < 3 > >::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++) { //getting global index of the neighbouring node work_array.push_back(static_cast<unsigned int> (neighb_it->FastGetSolutionStepValue(AUX_INDEX))); } //reordering neighbours following their global indices std::sort(work_array.begin(), work_array.end()); //setting current row start index mRowStartIndex[i_node] = row_start_temp; //nested loop over the by now ordered neighbours for (unsigned int counter = 0; counter < n_neighbours; counter++) { //getting global index of the neighbouring node unsigned int j_neighbour = work_array[counter]; //calculating CSR index unsigned int csr_index = mRowStartIndex[i_node] + counter; //saving column index j of the original matrix mColumnIndex[csr_index] = j_neighbour; //initializing the CSR vector entries with zero mNonzeroEdgeValues[csr_index].Mass = 0.0; //mNonzeroEdgeValues[csr_index].Laplacian = 0.0; noalias(mNonzeroEdgeValues[csr_index].LaplacianIJ) = ZeroMatrix(TDim, TDim); noalias(mNonzeroEdgeValues[csr_index].Ni_DNj) = ZeroVector(TDim); //TRANSPOSED GRADIENT noalias(mNonzeroEdgeValues[csr_index].DNi_Nj) = ZeroVector(TDim); } //preparing row start index for next node row_start_temp += n_neighbours; } } } //adding last entry (necessary for abort criterion of loops) mRowStartIndex[n_nodes] = mNumberEdges; //INITIALIZING NODE BASED VALUES //lumped mass matrix (elements Mi) /* #pragma omp parallel for for (int i_node=0; i_node<n_nodes; i_node++) mLumpedMassMatrix[i_node] = 0.0;*/ #pragma omp parallel for //set the heights to a huge number for (int i_node = 0; i_node < n_nodes; i_node++) mHmin[i_node] = 1e10; //diagonal of gradient matrix (elements Gii) // #pragma omp parallel for // for (int i_node=0; i_node<n_nodes; i_node++) // noalias(mDiagGradientMatrix[i_node]) = ZeroVector(TDim); KRATOS_CATCH("") } //********************************* //function to precalculate CSR data void BuildCSRData(ModelPart& model_part) { KRATOS_TRY //PRECALCULATING CSR DATA //defining temporary local variables for elementwise addition //shape functions array_1d<double, TDim + 1 > N; //shape function derivatives boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim> dN_dx; //volume double volume; //weighting factor double weighting_factor = 1.0 / static_cast<double> (TDim + 1); //elemental matrices boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim + 1 > mass_consistent; //boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian; array_1d<double, TDim + 1 > mass_lumped; //global indices of elemental nodes array_1d<unsigned int, TDim + 1 > nodal_indices; array_1d<double, TDim + 1 > heights; //loop over all elements for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++) { //LOCAL ELEMENTWISE CALCULATIONS //getting geometry data of the element GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume); //calculate lenght of the heights of the element for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0); for (unsigned int comp = 1; comp < TDim; comp++) { heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp); } heights[ie_node] = 1.0 / sqrt(heights[ie_node]); // KRATOS_WATCH(heights); } //setting up elemental mass matrices CalculateMassMatrix(mass_consistent, volume); noalias(mass_lumped) = ZeroVector(TDim + 1); for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume; mass_lumped[ie_node] += mass_consistent(ie_node, je_node); } //mass_lumped[ie_node] = volume * N[ie_node]; } /*OLD DATA STRUCTURE //calculating elemental laplacian matrix noalias(laplacian) = ZeroMatrix(TDim+1,TDim+1); for (unsigned int ie_node=0; ie_node<=TDim; ie_node++) for (unsigned int je_node=ie_node+1; je_node<=TDim; je_node++) //componentwise multiplication for (unsigned int component=0; component<TDim; component++) { //taking advantage of symmetry double temp = dN_dx(ie_node,component) * dN_dx(je_node,component) * volume; laplacian(ie_node,je_node) += temp; laplacian(je_node,ie_node) += temp; } //multiply gradient with volume referring to each gauss point dN_dx *= (volume / double(TDim+1));*/ //(corresponding to Ni * dOmega respectively Nj * dOmega) double weighted_volume = volume * weighting_factor; //ASSEMBLING GLOBAL DATA STRUCTURE //loop over the nodes of the element to determine their global indices for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) nodal_indices[ie_node] = static_cast<unsigned int> (elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX)); //assembling global "edge matrices" by adding local contributions for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //check the heights and change the value if minimal is found if (mHmin[ nodal_indices[ie_node] ] > heights[ie_node]) mHmin[ nodal_indices[ie_node] ] = heights[ie_node]; for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //remark: there is no edge linking node i with itself! //DIAGONAL TERMS if (ie_node != je_node) { //calculating CSR index from global index unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]); //assigning precalculated element data to the referring edges //contribution to edge mass mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node); //contribution to edge laplacian /*OLD DATA STRUCTURE mNonzeroEdgeValues[csr_index].Laplacian = laplacian(ie_node,je_node);*/ boost::numeric::ublas::bounded_matrix <double, TDim, TDim>& laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume; //contribution to edge gradient array_1d<double, TDim>& gradient = mNonzeroEdgeValues[csr_index].Ni_DNj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //gradient[l_comp] += dN_dx(je_node,l_comp); gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume; //TRANSPOSED GRADIENT //contribution to transposed edge gradient array_1d<double, TDim>& transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //transp_gradient[l_comp] += dN_dx(ie_node,l_comp); transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume; } } } //assembling node based vectors for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) //diagonal of the global lumped mass matrix mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node]; for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //diagonal of the global gradient matrix array_1d<double, TDim>& gradient = mDiagGradientMatrix[nodal_indices[ie_node]]; for (unsigned int component = 0; component < TDim; component++) //gradient[component] += dN_dx(ie_node,component); gradient[component] += dN_dx(ie_node, component) * weighted_volume; } } //copy mass matrix to inverted mass matrix for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode]; } //perform MPI syncronization between the domains //calculating inverted mass matrix (this requires syncronization for MPI paraellelism for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode]; } KRATOS_CATCH("") } //****************************************** //function to calculate CSR index of edge ij unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning CSR index of edge ij return csr_index; KRATOS_CATCH("") } //*********************************************** //function to get pointer to CSR tuple of edge ij CSR_Tuple* GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning pointer to CSR tuple of edge ij return &mNonzeroEdgeValues[csr_index]; KRATOS_CATCH("") } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mNonzeroEdgeValues.clear(); mColumnIndex.clear(); mRowStartIndex.clear(); mInvertedMassMatrix.clear(); mLumpedMassMatrix.clear(); mDiagGradientMatrix.clear(); mHmin.clear(); KRATOS_CATCH("") } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillCoordinatesFromDatabase(CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); #pragma omp parallel for firstprivate(n_nodes, it_begin) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = (*node_it)[component]; } KRATOS_CATCH(""); } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillOldVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void FillOldScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void WriteVectorToDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get reference of destination array_1d<double, 3 > & vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos); //save vector in database for (unsigned int component = 0; component < TDim; component++) vector[component] = (rOrigin[i_node])[component]; } KRATOS_CATCH(""); } void WriteScalarToDatabase(Variable<double>& rVariable, ValuesVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); int i_node = i; //get reference of destination double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save scalar in database scalar = rOrigin[i_node]; } KRATOS_CATCH(""); } //********************************************************************* //destination = origin1 + value * Minv*origin void Add_Minv_value( CalcVectorType& destination, const CalcVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const CalcVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const array_1d<double, TDim>& origin_vec1 = origin1[i_node]; const array_1d<double, TDim>& origin_value = origin[i_node]; double temp = value * m_inv; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = origin_vec1[comp] + temp * origin_value[comp]; } KRATOS_CATCH("") } void Add_Minv_value( ValuesVectorType& destination, const ValuesVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const ValuesVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { double& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const double& origin_vec1 = origin1[i_node]; const double& origin_value = origin[i_node]; double temp = value * m_inv; dest = origin_vec1 + temp * origin_value; } KRATOS_CATCH("") } //********************************************************************** void AllocateAndSetToZero(CalcVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void AllocateAndSetToZero(ValuesVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void SetToZero(CalcVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void SetToZero(ValuesVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void AssignVectorToVector(const CalcVectorType& origin, CalcVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { const array_1d<double, TDim>& orig = origin[i_node]; array_1d<double, TDim>& dest = destination[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = orig[comp]; } } void AssignVectorToVector(const ValuesVectorType& origin, ValuesVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { destination[i_node] = origin[i_node]; } } private: //number of edges unsigned int mNumberEdges; //CSR data vector for storage of the G, L and consistent M components of edge ij EdgesVectorType mNonzeroEdgeValues; //vector to store column indices of nonzero matrix elements for each row IndicesVectorType mColumnIndex; //index vector to access the start of matrix row i in the column vector IndicesVectorType mRowStartIndex; //inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours) ValuesVectorType mInvertedMassMatrix; //minimum height around one node ValuesVectorType mHmin; //lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix) ValuesVectorType mLumpedMassMatrix; //diagonal of the gradient matrix (separately stored due to special calculations) CalcVectorType mDiagGradientMatrix; //******************************************* //functions to set up elemental mass matrices void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; //1/6 //non-diagonal terms double temp = 0.08333333333333333333 * volume; // 1/12 for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.1 * volume; //non-diagonal terms double temp = 0.05 * volume; for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } }; } //namespace Kratos #endif //KRATOS_EDGE_DATA_H_INCLUDED defined
ConverterOSG.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <osg/CullFace> #include <osg/Geode> #include <osg/Hint> #include <osg/LineWidth> #include <osg/Material> #include <osg/Point> #include <osgUtil/Tessellator> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include <ifcpp/geometry/GeometrySettings.h> #include <ifcpp/geometry/SceneGraphUtils.h> #include <ifcpp/geometry/AppearanceData.h> #include "GeometryInputData.h" #include "IncludeCarveHeaders.h" #include "CSG_Adapter.h" class ConverterOSG : public StatusCallback { protected: shared_ptr<GeometrySettings> m_geom_settings; std::map<int, osg::ref_ptr<osg::Switch> > m_map_entity_id_to_switch; std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_id_to_switch; double m_recent_progress; osg::ref_ptr<osg::CullFace> m_cull_back_off; osg::ref_ptr<osg::StateSet> m_glass_stateset; //\brief StateSet caching and re-use std::vector<osg::ref_ptr<osg::StateSet> > m_vec_existing_statesets; bool m_enable_stateset_caching = false; #ifdef ENABLE_OPENMP Mutex m_writelock_appearance_cache; #endif public: ConverterOSG( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings(geom_settings) { m_cull_back_off = new osg::CullFace( osg::CullFace::BACK ); m_glass_stateset = new osg::StateSet(); m_glass_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); m_glass_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } virtual ~ConverterOSG() {} // Map: IfcProduct ID -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> >& getMapEntityIdToSwitch() { return m_map_entity_id_to_switch; } // Map: Representation Identifier -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> >& getMapRepresentationToSwitch() { return m_map_representation_id_to_switch; } void clearInputCache() { m_map_entity_id_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); } static void drawBoundingBox( const carve::geom::aabb<3>& aabb, osg::Geometry* geom ) { osg::ref_ptr<osg::Vec3Array> vertices = dynamic_cast<osg::Vec3Array*>( geom->getVertexArray() ); if( !vertices ) { vertices = new osg::Vec3Array(); geom->setVertexArray( vertices ); } const carve::geom::vector<3>& aabb_pos = aabb.pos; const carve::geom::vector<3>& extent = aabb.extent; const double dex = extent.x; const double dey = extent.y; const double dez = extent.z; const int vert_id_offset = vertices->size(); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); osg::ref_ptr<osg::DrawElementsUInt> box_lines = new osg::DrawElementsUInt( GL_LINE_STRIP, 0 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 4 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 4 ); geom->addPrimitiveSet( box_lines ); osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ) { throw OutOfMemoryException(); } osg::Vec4f ambientColor( 1.f, 0.2f, 0.1f, 1.f ); mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, ambientColor ); //mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); //mat->setColorMode( osg::Material::SPECULAR ); osg::StateSet* stateset = geom->getOrCreateStateSet(); if( !stateset ) { throw OutOfMemoryException(); } stateset->setAttribute( mat, osg::StateAttribute::ON ); stateset->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); } static void drawFace( const carve::mesh::Face<3>* face, osg::Geode* geode, bool add_color_array = false ) { #ifdef _DEBUG std::cout << "not triangulated" << std::endl; #endif std::vector<vec3> face_vertices; face_vertices.resize( face->nVertices() ); carve::mesh::Edge<3> *e = face->edge; const size_t num_vertices = face->nVertices(); for( size_t i = 0; i < num_vertices; ++i ) { face_vertices[i] = e->v1()->v; e = e->next; } if( num_vertices < 4 ) { std::cout << "drawFace is meant only for num vertices > 4" << std::endl; } vec3* vertex_vec; osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array( num_vertices ); if( !vertices ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::DrawElementsUInt> triangles = new osg::DrawElementsUInt( osg::PrimitiveSet::POLYGON, num_vertices ); if( !triangles ) { throw OutOfMemoryException(); } for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; ( *vertices )[i].set( vertex_vec->x, vertex_vec->y, vertex_vec->z ); ( *triangles )[i] = i; } osg::Vec3f poly_normal = SceneGraphUtils::computePolygonNormal( vertices ); osg::ref_ptr<osg::Vec3Array> normals = new osg::Vec3Array(); normals->resize( num_vertices, poly_normal ); osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->setNormalArray( normals ); normals->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POLYGON, 0, vertices->size() ) ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->resize( vertices->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } if( num_vertices > 4 ) { // TODO: check if polygon is convex with Gift wrapping algorithm osg::ref_ptr<osgUtil::Tessellator> tesselator = new osgUtil::Tessellator(); tesselator->setTessellationType( osgUtil::Tessellator::TESS_TYPE_POLYGONS ); //tesselator->setWindingType( osgUtil::Tessellator::TESS_WINDING_ODD ); tesselator->retessellatePolygons( *geometry ); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) + poly_normal ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( num_vertices * 2, osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } //#define DEBUG_DRAW_NORMALS static void drawMeshSet( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* geode, double crease_angle = M_PI*0.05, bool add_color_array = false ) { if( !meshset ) { return; } osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array(); if( !vertices_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array(); if( !normals_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> vertices_quad; osg::ref_ptr<osg::Vec3Array> normals_quad; const size_t max_num_faces_per_vertex = 10000; std::map<carve::mesh::Face<3>*, double> map_face_area; std::map<carve::mesh::Face<3>*, double>::iterator it_face_area; if( crease_angle > 0 ) { for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; // compute area of projected face: std::vector<vec2> projected; face->getProjectedVertices( projected ); double face_area = carve::geom2d::signedArea( projected ); map_face_area[face] = abs( face_area ); } } } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; const size_t n_vertices = face->nVertices(); if( n_vertices > 4 ) { drawFace( face, geode ); continue; } const vec3 face_normal = face->plane.N; if( crease_angle > 0 ) { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; vec3 intermediate_normal; // collect all faces at vertex // | ^ // | | // f1 e->rev | | e face // v | // <---e1------- <--------------- //-------------> ---------------> // | ^ // | | // v | carve::mesh::Edge<3>* e1 = e;// ->rev->next; carve::mesh::Face<3>* f1 = e1->face; #ifdef _DEBUG if( f1 != face ) { std::cout << "f1 != face" << std::endl; } #endif for( size_t i3 = 0; i3 < max_num_faces_per_vertex; ++i3 ) { if( !e1->rev ) { break; } if( !e1->rev->next ) { break; } vec3 f1_normal = f1->plane.N; const double cos_angle = dot( f1_normal, face_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { double weight = 0.0; it_face_area = map_face_area.find( f1 ); if( it_face_area != map_face_area.end() ) { weight = it_face_area->second; } intermediate_normal += weight*f1_normal; } } if( !e1->rev ) { // it's an open mesh break; } e1 = e1->rev->next; if( !e1 ) { break; } f1 = e1->face; #ifdef _DEBUG if( e1->vert != vertex ) { std::cout << "e1->vert != vertex" << std::endl; } #endif if( f1 == face ) { break; } } const double intermediate_normal_length = intermediate_normal.length(); if( intermediate_normal_length < 0.0000000001 ) { intermediate_normal = face_normal; } else { // normalize: intermediate_normal *= 1.0 / intermediate_normal_length; } const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } e = e->next; } } else { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } e = e->next; } } } } if( vertices_tri->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_tri ); geometry->setNormalArray( normals_tri ); normals_tri->setBinding( osg::Array::BIND_PER_VERTEX ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_tri->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < vertices_tri->size(); ++i ) { osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i]; osg::Vec3f& normal_vec = normals_tri->at( i ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } if( vertices_quad ) { if( vertices_quad->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_quad ); if( normals_quad ) { normals_quad->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setNormalArray( normals_quad ); } if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_quad->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); } } } static void drawPolyline( const carve::input::PolylineSetData* polyline_data, osg::Geode* geode, bool add_color_array = false ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); if( !vertices ) { throw OutOfMemoryException(); } carve::line::PolylineSet* polyline_set = polyline_data->create( carve::input::opts() ); if( polyline_set->vertices.size() < 2 ) { #ifdef _DEBUG std::cout << __FUNC__ << ": polyline_set->vertices.size() < 2" << std::endl; #endif return; } for( auto it = polyline_set->lines.begin(); it != polyline_set->lines.end(); ++it ) { const carve::line::Polyline* pline = *it; size_t vertex_count = pline->vertexCount(); for( size_t vertex_i = 0; vertex_i < vertex_count; ++vertex_i ) { if( vertex_i >= polyline_set->vertices.size() ) { #ifdef _DEBUG std::cout << __FUNC__ << ": vertex_i >= polyline_set->vertices.size()" << std::endl; #endif continue; } const carve::line::Vertex* v = pline->vertex( vertex_i ); vertices->push_back( osg::Vec3d( v->v[0], v->v[1], v->v[2] ) ); } } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINE_STRIP, 0, vertices->size() ) ); if( add_color_array ) { osg::Vec4f color( 0.6f, 0.6f, 0.6f, 0.1f ); osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array( vertices->size(), &color ); if( !colors ) { throw OutOfMemoryException(); } colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geode->addDrawable( geometry ); } void computeCreaseEdgesFromMeshset( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, std::vector<carve::mesh::Edge<3>* >& vec_edges_out, const double crease_angle ) { if( !meshset ) { return; } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const std::vector<carve::mesh::Edge<3>* >& vec_closed_edges = mesh->closed_edges; for( size_t i_edge = 0; i_edge < vec_closed_edges.size(); ++i_edge ) { carve::mesh::Edge<3>* edge = vec_closed_edges[i_edge]; if( !edge ) { continue; } carve::mesh::Edge<3>* edge_reverse = edge->rev; if( !edge_reverse ) { continue; } carve::mesh::Face<3>* face = edge->face; carve::mesh::Face<3>* face_reverse = edge_reverse->face; const carve::geom::vector<3>& f1_normal = face->plane.N; const carve::geom::vector<3>& f2_normal = face_reverse->plane.N; const double cos_angle = dot( f1_normal, f2_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { continue; } } // TODO: if area of face and face_reverse is equal, skip the crease edge. It could be the inside or outside of a cylinder. Check also if > 2 faces in a row have same normal angle differences vec_edges_out.push_back( edge ); } } } void renderMeshsetCreaseEdges( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* target_geode, const double crease_angle ) { if( !meshset ) { return; } if( !target_geode ) { return; } std::vector<carve::mesh::Edge<3>* > vec_crease_edges; computeCreaseEdgesFromMeshset( meshset, vec_crease_edges, crease_angle ); if( vec_crease_edges.size() > 0 ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_edge = 0; i_edge < vec_crease_edges.size(); ++i_edge ) { const carve::mesh::Edge<3>* edge = vec_crease_edges[i_edge]; const carve::geom::vector<3>& vertex1 = edge->v1()->v; const carve::geom::vector<3>& vertex2 = edge->v2()->v; vertices->push_back( osg::Vec3d( vertex1.x, vertex1.y, vertex1.z ) ); vertices->push_back( osg::Vec3d( vertex2.x, vertex2.y, vertex2.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices->size() ) ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry->getOrCreateStateSet()->setMode( GL_BLEND, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::LineWidth( 3.0f ), osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setMode( GL_LINE_SMOOTH, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::Hint( GL_LINE_SMOOTH_HINT, GL_NICEST ), osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setRenderBinDetails( 10, "RenderBin"); target_geode->addDrawable( geometry ); } } void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp ) { for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii ) { const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii]; if( !appearance ) { continue; } if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_SURFACE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY ) { osg::ref_ptr<osg::StateSet> item_stateset; convertToOSGStateSet( appearance, item_stateset ); if( item_stateset ) { osg::StateSet* existing_item_stateset = grp->getStateSet(); if( existing_item_stateset ) { if( existing_item_stateset != item_stateset ) { existing_item_stateset->merge( *item_stateset ); } } else { grp->setStateSet( item_stateset ); } } } else if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE ) { } } } osg::Matrixd convertMatrixToOSG( const carve::math::Matrix& mat_in ) { return osg::Matrixd( mat_in.m[0][0], mat_in.m[0][1], mat_in.m[0][2], mat_in.m[0][3], mat_in.m[1][0], mat_in.m[1][1], mat_in.m[1][2], mat_in.m[1][3], mat_in.m[2][0], mat_in.m[2][1], mat_in.m[2][2], mat_in.m[2][3], mat_in.m[3][0], mat_in.m[3][1], mat_in.m[3][2], mat_in.m[3][3] ); } //\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertProductShapeToOSG( shared_ptr<ProductShapeData>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>( ifc_object_def ); if( !ifc_product ) { return; } const int product_id = ifc_product->m_entity_id; std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; bool draw_bounding_box = false; // create OSG objects std::vector<shared_ptr<RepresentationData> >& vec_product_representations = product_shape->m_vec_representations; for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation ) { const shared_ptr<RepresentationData>& product_representation_data = vec_product_representations[ii_representation]; if( product_representation_data->m_ifc_representation.expired() ) { continue; } shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation ); const int representation_id = ifc_representation->m_entity_id; osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch(); #ifdef _DEBUG std::stringstream strs_representation_name; strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation; representation_switch->setName( strs_representation_name.str().c_str() ); #endif const std::vector<shared_ptr<ItemShapeData> >& product_items = product_representation_data->m_vec_item_data; for( size_t i_item = 0; i_item < product_items.size(); ++i_item ) { const shared_ptr<ItemShapeData>& item_shape = product_items[i_item]; osg::ref_ptr<osg::MatrixTransform> item_group = new osg::MatrixTransform(); if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); } #ifdef _DEBUG std::stringstream strs_item_name; strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item; item_group->setName( strs_item_name.str().c_str() ); #endif // create shape for open shells for( size_t ii = 0; ii < item_shape->m_meshsets_open.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets_open[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } // disable back face culling for open meshes geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF ); item_group->addChild( geode ); if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", open meshset " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for meshsets for( size_t ii = 0; ii < item_shape->m_meshsets.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode_meshset = new osg::Geode(); if( !geode_meshset ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode_meshset, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); item_group->addChild( geode_meshset ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode_meshset, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode_meshset->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", meshset " << ii; geode_meshset->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for points const std::vector<shared_ptr<carve::input::VertexData> >& vertex_points = item_shape->getVertexPoints(); for( size_t ii = 0; ii < vertex_points.size(); ++ii ) { const shared_ptr<carve::input::VertexData>& pointset_data = vertex_points[ii]; if( pointset_data ) { if( pointset_data->points.size() > 0 ) { osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_pointset_point = 0; i_pointset_point < pointset_data->points.size(); ++i_pointset_point ) { vec3& carve_point = pointset_data->points[i_pointset_point]; vertices->push_back( osg::Vec3d( carve_point.x, carve_point.y, carve_point.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) ); geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON ); geode->addDrawable( geometry ); geode->setCullingActive( false ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", vertex_point " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } } } // create shape for polylines for( size_t ii = 0; ii < item_shape->m_polylines.size(); ++ii ) { shared_ptr<carve::input::PolylineSetData>& polyline_data = item_shape->m_polylines[ii]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); drawPolyline( polyline_data.get(), geode ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", polylines " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } if( m_geom_settings->isShowTextLiterals() ) { for( size_t ii = 0; ii < item_shape->m_vec_text_literals.size(); ++ii ) { shared_ptr<TextItemData>& text_data = item_shape->m_vec_text_literals[ii]; if( !text_data ) { continue; } carve::math::Matrix& text_pos = text_data->m_text_position; // TODO: handle rotation std::string text_str; text_str.assign( text_data->m_text.begin(), text_data->m_text.end() ); osg::Vec3 pos2( text_pos._41, text_pos._42, text_pos._43 ); osg::ref_ptr<osgText::Text> txt = new osgText::Text(); if( !txt ) { throw OutOfMemoryException( __FUNC__ ); } txt->setFont( "fonts/arial.ttf" ); txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) ); txt->setCharacterSize( 0.1f ); txt->setAutoRotateToScreen( true ); txt->setPosition( pos2 ); txt->setText( text_str.c_str() ); txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ){ throw OutOfMemoryException( __FUNC__ ); } geode->addDrawable( txt ); item_group->addChild( geode ); } } // apply statesets if there are any if( item_shape->m_vec_item_appearances.size() > 0 ) { applyAppearancesToGroup( item_shape->m_vec_item_appearances, item_group ); } // If anything has been created, add it to the representation group if( item_group->getNumChildren() > 0 ) { #ifdef _DEBUG if( item_group->getNumParents() > 0 ) { std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl; } #endif representation_switch->addChild( item_group ); } } // apply statesets if there are any if( product_representation_data->m_vec_representation_appearances.size() > 0 ) { applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch ); } // If anything has been created, add it to the product group if( representation_switch->getNumChildren() > 0 ) { #ifdef _DEBUG if( representation_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl; } #endif // enable transparency for certain objects if( dynamic_pointer_cast<IfcSpace>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); } else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); SceneGraphUtils::setMaterialAlpha( representation_switch, 0.6f, true ); } // check if parent building element is window if( ifc_product->m_Decomposes_inverse.size() > 0 ) { for( size_t ii_decomposes = 0; ii_decomposes < ifc_product->m_Decomposes_inverse.size(); ++ii_decomposes ) { const weak_ptr<IfcRelAggregates>& decomposes_weak = ifc_product->m_Decomposes_inverse[ii_decomposes]; if( decomposes_weak.expired() ) { continue; } shared_ptr<IfcRelAggregates> decomposes_ptr(decomposes_weak); shared_ptr<IfcObjectDefinition>& relating_object = decomposes_ptr->m_RelatingObject; if( relating_object ) { if( dynamic_pointer_cast<IfcCurtainWall>(relating_object) || dynamic_pointer_cast<IfcWindow>(relating_object) ) { representation_switch->setStateSet(m_glass_stateset); SceneGraphUtils::setMaterialAlpha(representation_switch, 0.6f, true); } } } } map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) ); } } // TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs } /*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData. \param[out] parent_group Group to append the geometry. **/ void convertToOSG( const std::map<int, shared_ptr<ProductShapeData> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Converting geometry to OpenGL format ..." ); progressValueCallback( 0, "scenegraph" ); m_map_entity_id_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); shared_ptr<ProductShapeData> ifc_project_data; std::vector<shared_ptr<ProductShapeData> > vec_products; for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it ) { shared_ptr<ProductShapeData> shape_data = it->second; if( shape_data ) { vec_products.push_back( shape_data ); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<int, osg::ref_ptr<osg::Switch> >* map_entity_id = &m_map_entity_id_to_switch; std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_id_to_switch; const int num_products = (int)vec_products.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_message_callback; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_products) shared(map_entity_id, map_representations) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,40) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<ProductShapeData>& shape_data = vec_products[i]; weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition; if( ifc_object_def_weak.expired() ) { continue; } shared_ptr<IfcObjectDefinition> ifc_object_def( ifc_object_def_weak ); std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = shape_data; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { continue; } if( !ifc_product->m_Representation ) { continue; } const int product_id = ifc_product->m_entity_id; std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches; try { convertProductShapeToOSG( shape_data, map_representation_switches ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( carve::exception& e ) { thread_err << e.str(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } if( map_representation_switches.size() > 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); osg::ref_ptr<osg::MatrixTransform> product_transform = new osg::MatrixTransform(); product_transform->setMatrix( convertMatrixToOSG( shape_data->getTransform() ) ); product_switch->addChild( product_transform ); std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; product_switch->setName( strs_product_switch_name.str().c_str() ); for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map ) { osg::ref_ptr<osg::Switch>& repres_switch = it_map->second; product_transform->addChild( repres_switch ); } // apply statesets if there are any const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances(); if( vec_product_appearances.size() > 0 ) { applyAppearancesToGroup( vec_product_appearances, product_switch ); } #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_entity_id->insert( std::make_pair( product_id, product_switch ) ); map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() ); } if( thread_err.tellp() > 0 ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_message_callback ); #endif messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "scenegraph" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data, parent_group ); } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } progressValueCallback( 0.9, "scenegraph" ); } void addNodes( const std::map<int, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group ) { // check if there are entities that are not in spatial structure if( !target_group ) { target_group = new osg::Switch(); } for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes ) { int product_id = it_product_shapes->first; auto it_find = m_map_entity_id_to_switch.find( product_id ); if( it_find != m_map_entity_id_to_switch.end() ) { osg::ref_ptr<osg::Switch>& sw = it_find->second; if( sw ) { target_group->addChild( sw ); } } } } void resolveProjectStructure( const shared_ptr<ProductShapeData>& product_data, osg::ref_ptr<osg::Switch> group ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> object_def( product_data->m_ifc_object_definition ); const int entity_id = object_def->m_entity_id; if( SceneGraphUtils::inParentList( entity_id, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, object_def.get() ); return; } const std::vector<shared_ptr<ProductShapeData> >& vec_children = product_data->getChildren(); for( size_t ii = 0; ii < vec_children.size(); ++ii ) { const shared_ptr<ProductShapeData>& child_product_data = vec_children[ii]; if( !child_product_data ) { continue; } osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); if( !child_product_data->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition ); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_entity_id << "="; group_subparts_name << child_obj_def->className(); group_subparts->setName( group_subparts_name.str().c_str() ); } group->addChild( group_subparts ); resolveProjectStructure( child_product_data, group_subparts ); } auto it_product_map = m_map_entity_id_to_switch.find( entity_id ); if( it_product_map != m_map_entity_id_to_switch.end() ) { const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second; if( product_switch ) { group->addChild( product_switch ); } } else { if( group->getNumChildren() == 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); group->addChild( product_switch ); std::stringstream switch_name; switch_name << "#" << entity_id << "=" << object_def->className(); product_switch->setName( switch_name.str().c_str() ); } } } void clearAppearanceCache() { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif m_vec_existing_statesets.clear(); } void convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence, osg::ref_ptr<osg::StateSet>& target_stateset ) { if( !appearence ) { return; } const float shininess = appearence->m_shininess; const float transparency = appearence->m_transparency; const bool set_transparent = appearence->m_set_transparent; const float color_ambient_r = appearence->m_color_ambient.r(); const float color_ambient_g = appearence->m_color_ambient.g(); const float color_ambient_b = appearence->m_color_ambient.b(); const float color_ambient_a = appearence->m_color_ambient.a(); const float color_diffuse_r = appearence->m_color_diffuse.r(); const float color_diffuse_g = appearence->m_color_diffuse.g(); const float color_diffuse_b = appearence->m_color_diffuse.b(); const float color_diffuse_a = appearence->m_color_diffuse.a(); const float color_specular_r = appearence->m_color_specular.r(); const float color_specular_g = appearence->m_color_specular.g(); const float color_specular_b = appearence->m_color_specular.b(); const float color_specular_a = appearence->m_color_specular.a(); if( m_enable_stateset_caching ) { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif for( size_t i = 0; i<m_vec_existing_statesets.size(); ++i ) { const osg::ref_ptr<osg::StateSet> stateset_existing = m_vec_existing_statesets[i]; if( !stateset_existing.valid() ) { continue; } osg::ref_ptr<osg::Material> mat_existing = (osg::Material*)stateset_existing->getAttribute( osg::StateAttribute::MATERIAL ); if( !mat_existing ) { continue; } // compare osg::Vec4f color_ambient_existing = mat_existing->getAmbient( osg::Material::FRONT_AND_BACK ); if( fabs( color_ambient_existing.r() - color_ambient_r ) > 0.03 ) break; if( fabs( color_ambient_existing.g() - color_ambient_g ) > 0.03 ) break; if( fabs( color_ambient_existing.b() - color_ambient_b ) > 0.03 ) break; if( fabs( color_ambient_existing.a() - color_ambient_a ) > 0.03 ) break; osg::Vec4f color_diffuse_existing = mat_existing->getDiffuse( osg::Material::FRONT_AND_BACK ); if( fabs( color_diffuse_existing.r() - color_diffuse_r ) > 0.03 ) break; if( fabs( color_diffuse_existing.g() - color_diffuse_g ) > 0.03 ) break; if( fabs( color_diffuse_existing.b() - color_diffuse_b ) > 0.03 ) break; if( fabs( color_diffuse_existing.a() - color_diffuse_a ) > 0.03 ) break; osg::Vec4f color_specular_existing = mat_existing->getSpecular( osg::Material::FRONT_AND_BACK ); if( fabs( color_specular_existing.r() - color_specular_r ) > 0.03 ) break; if( fabs( color_specular_existing.g() - color_specular_g ) > 0.03 ) break; if( fabs( color_specular_existing.b() - color_specular_b ) > 0.03 ) break; if( fabs( color_specular_existing.a() - color_specular_a ) > 0.03 ) break; float shininess_existing = mat_existing->getShininess( osg::Material::FRONT_AND_BACK ); if( fabs( shininess_existing - shininess ) > 0.03 ) break; bool blend_on_existing = stateset_existing->getMode( GL_BLEND ) == osg::StateAttribute::ON; if( blend_on_existing != set_transparent ) break; bool transparent_bin = stateset_existing->getRenderingHint() == osg::StateSet::TRANSPARENT_BIN; if( transparent_bin != set_transparent ) break; // if we get here, appearance is same as existing state set // TODO: block this re-used stateset for merging, or prevent merged statesets from being re-used target_stateset = stateset_existing; return; } } osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency ); osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency ); osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency ); // TODO: material caching and re-use osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ){ throw OutOfMemoryException(); } mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, diffuseColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, specularColor ); mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); mat->setColorMode( osg::Material::SPECULAR ); target_stateset = new osg::StateSet(); if( !target_stateset ){ throw OutOfMemoryException(); } target_stateset->setAttribute( mat, osg::StateAttribute::ON ); if( appearence->m_set_transparent ) { mat->setTransparency( osg::Material::FRONT_AND_BACK, transparency ); target_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); target_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } if( appearence->m_specular_exponent != 0.f ) { //osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights(); //spec_highlights->setSpecularExponent( spec->m_value ); // todo: add to scenegraph } if( m_enable_stateset_caching ) { m_vec_existing_statesets.push_back( target_stateset ); } } };
mem-consistency-openmp3x.c
/************************************************************************ OpenMP-3.0 Example Codes Beta-v1.0 File : mem-consistency-openmp3x.c Date : Aug 2011 Description : Simple example program to demonstrates the importance to maintain the memory consistency. In the example program , at Print 1, the value of x could be either 2 or 5,depending on the timing of the threads, and the implementation of the assignment to x. There are two reasons that the value at Print 1 might not be 5. First,Print 1 might be executed before the assignment to x is executed. Second, even if Print 1 is executed after the assignment,the value 5 is not guaranteed to be seen by thread 1 because a flush may not have been executed by thread 0 since the assignment. The barrier after Print 1 contains implicit flushes on all thread as well as a thread synchronization, so the programmer is guaranteed that the value 5 will be printed by both Print 2 and Print 3. Input : None Output : Value of the variable x *****************************************************************************************/ /* Header file inclusion */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* Main Program */ int main(int argc,char **argv){ int x,numThreads; /* Checking for command line arguments */ if( argc != 2 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <No. of Threads>\n"); exit(-1); } /* Initalizing Number of Nodes in the List and Number of threads */ numThreads =atoi(argv[1]); x = 2; /* Setting the number of threads */ omp_set_num_threads(numThreads); /* Create the parallel region and set the no. of threads to 2 */ #pragma omp parallel shared(x) { if (omp_get_thread_num() == 0) { x = 5; //sleep(10); } else { /* Print 1: the following read of x has a race */ printf("\n\t\t 1: Thread# %d: x = %d\n", omp_get_thread_num(),x ); } #pragma omp barrier /* Perform the synchronization */ if (omp_get_thread_num() == 0) { /* Print 2 */ printf("\n\t\t 2: Thread# %d: x = %d\n", omp_get_thread_num(),x ); } else { /* Print 3 */ printf("\n\t\t 3: Thread# %d: x = %d\n", omp_get_thread_num(),x ); } } return 0; }
GB_binop__first_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__first_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__first_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__first_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint32) // A*D function (colscale): GB (_AxD__first_uint32) // D*A function (rowscale): GB (_DxB__first_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__first_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__first_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_UINT32 || GxB_NO_FIRST_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
summary.c
#include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> /* getopt */ #include <string.h> /* memset */ #include "../gptl.h" #ifdef THREADED_OMP #include <omp.h> #endif static int iam = 0; static int nproc = 1; /* number of MPI tasks (default 1) */ static int nthreads = 1; /* number of threads (default 1) */ double sub (int); int main (int argc, char **argv) { char pname[MPI_MAX_PROCESSOR_NAME]; int iter; int counter; int c; int tnum = 0; int resultlen; int ret; double value; extern char *optarg; while ((c = getopt (argc, argv, "p:")) != -1) { switch (c) { case 'p': if ((ret = GPTLevent_name_to_code (optarg, &counter)) != 0) { printf ("Failure from GPTLevent_name_to_code\n"); return 1; } if (GPTLsetoption (counter, 1) < 0) { printf ("Failure from GPTLsetoption (%s,1)\n", optarg); return 1; } break; default: printf ("unknown option %c\n", c); printf ("Usage: %s [-p option_name]\n", argv[0]); return 2; } } ret = GPTLsetoption (GPTLabort_on_error, 1); ret = GPTLsetoption (GPTLoverhead, 1); ret = GPTLsetoption (GPTLnarrowprint, 1); if (MPI_Init (&argc, &argv) != MPI_SUCCESS) { printf ("Failure from MPI_Init\n"); return 1; } /* ** If ENABLE_PMPI is set, GPTL was initialized in MPI_Init */ #ifndef ENABLE_PMPI ret = GPTLinitialize (); ret = GPTLstart ("total"); #endif ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam); ret = MPI_Comm_size (MPI_COMM_WORLD, &nproc); ret = MPI_Get_processor_name (pname, &resultlen); printf ("Rank %d is running on processor %s\n", iam, pname); #ifdef THREADED_OMP nthreads = omp_get_max_threads (); #pragma omp parallel for private (iter, ret, tnum) #endif for (iter = 1; iter <= nthreads; iter++) { #ifdef THREADED_OMP tnum = omp_get_thread_num (); #endif printf ("Thread %d of rank %d on processor %s\n", tnum, iam, pname); value = sub (iter); } #ifndef ENABLE_PMPI ret = GPTLstop ("total"); ret = GPTLpr (iam); #endif if (iam == 0) { printf ("summary: testing GPTLpr_summary...\n"); printf ("Number of threads was %d\n", nthreads); printf ("Number of tasks was %d\n", nproc); } if (GPTLpr_summary (MPI_COMM_WORLD) != 0) return 1; if (GPTLpr_summary_file (MPI_COMM_WORLD, "timing.summary.duplicate") != 0) return 1; ret = MPI_Finalize (); if (GPTLfinalize () != 0) return 1; return 0; } double sub (int iter) { unsigned long usec; unsigned long looplen = iam*iter*100000; unsigned long i; double sum; int ret; ret = GPTLstart ("sub"); /* Sleep msec is mpi rank + thread number */ usec = 1000 * (iam * iter); ret = GPTLstart ("sleep"); usleep (usec); ret = GPTLstop ("sleep"); ret = GPTLstart ("work"); sum = 0.; ret = GPTLstart ("add"); for (i = 0; i < looplen; ++i) { sum += i; } ret = GPTLstop ("add"); ret = GPTLstart ("madd"); for (i = 0; i < looplen; ++i) { sum += i*1.1; } ret = GPTLstop ("madd"); ret = GPTLstart ("div"); for (i = 0; i < looplen; ++i) { sum /= 1.1; } ret = GPTLstop ("div"); ret = GPTLstop ("work"); ret = GPTLstop ("sub"); return sum; }
imd_integrate.c
/****************************************************************************** * * IMD -- The ITAP Molecular Dynamics Program * * Copyright 1996-2011 Institute for Theoretical and Applied Physics, * University of Stuttgart, D-70550 Stuttgart * ******************************************************************************/ /****************************************************************************** * * imd_integrate -- various md integrators * ******************************************************************************/ /****************************************************************************** * $Revision$ * $Date$ ******************************************************************************/ #include "imd.h" //MYMOD //#define ELECPRESS //ENDOF MYMOD /***************************************************************************** * * Basic NVE Integrator * *****************************************************************************/ #if defined(NVE) || defined(EPITAX) void move_atoms_nve(void) { int k; static int count = 0; real tmp_f_max2=0.0; real tmp_x_max2=0.0; //MYMOD : Moechte auch pdecay ohne ttm nutzen! #ifdef PDECAY double a= 1.0/(ramp_end - ramp_start); //HOTFIX fuer +/- y bnd double ay0 =1.0/(ramp_y0max-ramp_y0min); double ay1 =1.0/(ramp_y1max-ramp_y1min); ay0*=ay0; ay1*=ay1; a*=a; #endif //ENDOF MYMOD #ifdef DAMP real tmp1, tmp2, tmp3, f, maxax, maxax2; #endif #ifdef BER real cc; #endif #ifdef BER /* reproduce Ju Li's implementation of the Berendsen Thermostat */ cc = 1. - timestep/ tauber * ( ( 2.0 * tot_kin_energy / nactive+8.6174101569719990e-06) / (temperature+8.6174101569719990e-06) - 1. ); /* this would be the standard formula */ //cc = 1.+timestep/tauber*((temperature+8.6174101569719990e-06)/(2.0*tot_kin_energy/nactive+8.6174101569719990e-06)- 1. ); if (cc < 0.5) cc = 0.5; else if (cc > 2.0) cc = 2.0; cc=sqrt(cc); #endif /* epitax may call this routine for other ensembles, in which case we do not reset tot_kin_energy */ if ((ensemble==ENS_NVE) || (ensemble==ENS_GLOK)) tot_kin_energy = 0.0; fnorm = 0.0; xnorm = 0.0; pnorm = 0.0; PxF = 0.0; omega_E = 0.0; #ifdef DAMP n_damp = 0; tot_kin_energy_damp = 0.0; #endif #ifdef SHOCK if (do_press_calc) calc_pxavg(); #endif /* loop over all cells */ #ifdef _OPENMP #pragma omp parallel for reduction(+:tot_kin_energy,fnorm,omega_E,PxF,pnorm) #endif for (k=0; k<NCELLS; ++k) { /* loop over all cells */ int i,j, sort; cell *p; real kin_energie_1, kin_energie_2, tmp; #ifdef UNIAX real rot_energie_1, rot_energie_2; real dot, norm; vektor cross; #endif #ifdef RIGID int satom; real relmass; #endif #ifdef DAMP real kin_energie_damp_1,kin_energie_damp_2,tmp2,rampedtemp,zeta_finnis; #endif p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif IMPULS(p,i+j,X) = IMPULS(p,i,X); IMPULS(p,i+j,Y) = IMPULS(p,i,Y); #ifndef TWOD IMPULS(p,i+j,Z) = IMPULS(p,i,Z); #endif } #endif /* CLONE */ #ifdef SX #pragma vdir vector,nodep #endif for (i=0; i<p->n; ++i) { /* loop over all atoms in the cell */ #ifdef EPITAX /* beam atoms are always integrated by NVE */ if ( (ensemble != ENS_NVE) && (NUMMER(p,i) <= epitax_sub_n) && (POTENG(p,i) <= epitax_ctrl * epitax_poteng_min) ) continue; #endif #ifndef DAMP kin_energie_1 = SPRODN(IMPULS,p,i,IMPULS,p,i); #endif #ifdef UNIAX rot_energie_1 = SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i); #endif sort = VSORTE(p,i); #ifdef RIGID if ( superatom[sort] > -1 ) { satom = superatom[sort]; relmass = MASSE(p,i) / supermass[satom]; if ( (superrestrictions + satom)->x ) KRAFT(p,i,X) = superforce[satom].x * relmass; if ( (superrestrictions + satom)->y ) KRAFT(p,i,Y) = superforce[satom].y * relmass; #ifndef TWOD if ( (superrestrictions + satom)->z ) KRAFT(p,i,Z) = superforce[satom].z * relmass; #endif } #endif #if defined(FBC) && !defined(RIGID) /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif #if defined(FBC) && defined(BEND) /* give virtual particles their extra force */ KRAFT(p,i,X) += (bend_forces + sort)->x; KRAFT(p,i,Y) += (bend_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (bend_forces + sort)->z; #endif #endif #ifdef LANGEVIN if (VISCOUS_FRICTION(p,i)>1e-12){ //Langevin Thermostat, also uses the viscous daming part real sigma = sqrt(24. * temperature * (VISCOUS_FRICTION(p,i)/timestep)/timestep * MASSE(p,i)); KRAFT(p,i,X) += ((drand48()-0.5)*sigma); KRAFT(p,i,Y) += ((drand48()-0.5)*sigma); KRAFT(p,i,Z) += ((drand48()-0.5)*sigma); } #endif #ifdef VISCOUS if (VISCOUS_FRICTION(p,i)>1e-12){ //Viscous damping real sfric = VISCOUS_FRICTION(p,i)/timestep; KRAFT(p,i,X) -= IMPULS(p,i,X)*sfric; KRAFT(p,i,Y) -= IMPULS(p,i,Y)*sfric; KRAFT(p,i,Z) -= IMPULS(p,i,Z)*sfric; } #endif /* and set their force (->momentum) in restricted directions to 0 */ KRAFT(p,i,X) *= (restrictions + sort)->x; KRAFT(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD KRAFT(p,i,Z) *= (restrictions + sort)->z; #endif #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif #ifdef EINSTEIN omega_E += SPRODN(KRAFT,p,i,KRAFT,p,i) / MASSE(p,i); #endif // **************************************************************************** #ifndef DAMP /* Normal NVE */ //MYMOD : Auch ohne ttm moechte ich pdecay nutzen koennen! #ifdef PDECAY if( ORT(p,i,X) > ramp_start ) #ifdef NRB if(NRBBND(p,i)==0) #endif KRAFT(p,i,X) -= ( IMPULS(p,i,X)/MASSE(p,i)) * xipdecay * a * ( ORT(p,i,X) - ramp_start ) * ( ORT(p,i,X) - ramp_start ); #endif //HOTIFX fuer +/- y bnd: wude bei 2d-sims. benutzt..brauche ich nun nicht mehr. //ACHTUNG: Ablatiertes Material nicht kuehlen! // if(ORT(p,i,X)>=pdecay_surfx) // { // if( ORT(p,i,Y) > ramp_y1min ) // KRAFT(p,i,Y) -= ( IMPULS(p,i,Y)/MASSE(p,i)) * xipdecay * ay1 * ( ORT(p,i,Y) - ramp_y1min ) * ( ORT(p,i,Y) - ramp_y1min ); // else if(ORT(p,i,Y)< ramp_y0max) // KRAFT(p,i,Y) -= ( IMPULS(p,i,Y)/MASSE(p,i)) * xipdecay * ay0 * ( ramp_y0max -ORT(p,i,Y) ) * ( ramp_y0max-ORT(p,i,Y) ); // } // #endif //ENDOF MYMOD //MYMOD #ifdef NRB if(NRBBND(p,i)==0) //d.h. in diesem Fall nur für nicht-bnd-atome das standart-schema.Für bnd-atome wird der impuls anders berechnet { IMPULS(p,i,X) += timestep * KRAFT(p,i,X); IMPULS(p,i,Y) += timestep * KRAFT(p,i,Y); IMPULS(p,i,Z) += timestep * KRAFT(p,i,Z); } #else //Standart IMPULS(p,i,X) += timestep * KRAFT(p,i,X); IMPULS(p,i,Y) += timestep * KRAFT(p,i,Y); IMPULS(p,i,Z) += timestep * KRAFT(p,i,Z); #endif //ENDOF MYMOD //standart-schema /* IMPULS(p,i,X) += timestep * KRAFT(p,i,X); IMPULS(p,i,Y) += timestep * KRAFT(p,i,Y); IMPULS(p,i,Z) += timestep * KRAFT(p,i,Z); */ // ****************************************************************************** #else /* Damping layers */ /* use a local thermostat: Finnis We fix a temperature gradient from temp to zero in the same way the damping constant is ramped up. The mean temperature in the damping layers has no meaning, only temperature of the inner part is output */ /* the stadium function for each atom could also be calculated in forces_nbl to save time */ /* it is the users responsability that stadium.i / stadium2.i is equal for all i */ maxax = MAX(MAX(stadium.x,stadium.y),stadium.z); maxax2 = MAX(MAX(stadium2.x,stadium2.y),stadium2.z); /* Calculate stadium function f */ tmp1 = (stadium2.x==0) ? 0 : SQR((ORT(p,i,X)-center.x)/(2.0*stadium2.x)); tmp2 = (stadium2.y==0) ? 0 : SQR((ORT(p,i,Y)-center.y)/(2.0*stadium2.y)); tmp3 = (stadium2.z==0) ? 0 : SQR((ORT(p,i,Z)-center.z)/(2.0*stadium2.z)); f = (tmp1+tmp2+tmp3-SQR(maxax / (2.0*maxax2)) ) / (0.25 - SQR(maxax / (2.0*maxax2)) ); if (f<= 0.0) f = 0.0; else if (f>1.0) f = 1.0; /* we smooth the stadium function: to get a real bath tub !*/ DAMPF(p,i) = .5 * (1 + sin(-M_PI/2.0 + M_PI*f)); if (DAMPF(p,i) == 0.0) { /* take care of possible rounding errors ? */ kin_energie_1 = SPRODN(IMPULS,p,i,IMPULS,p,i); IMPULS(p,i,X) += timestep * KRAFT(p,i,X); IMPULS(p,i,Y) += timestep * KRAFT(p,i,Y); #ifndef TWOD IMPULS(p,i,Z) += timestep * KRAFT(p,i,Z); #endif kin_energie_2 = SPRODN(IMPULS,p,i,IMPULS,p,i); tot_kin_energy += (kin_energie_1 + kin_energie_2) / (4 * MASSE(p,i)); } else { kin_energie_damp_1 = SPRODN(IMPULS,p,i,IMPULS,p,i); tmp = kin_energie_damp_1 / MASSE(p,i); /* local temp */ tmp2 = (restrictions + sort)->x + (restrictions + sort)->y; #ifndef TWOD tmp2 += (restrictions + sort)->z; #endif n_damp += tmp2; if (tmp2 != 0) tmp /= tmp2; /* to account for restricted mobilities */ rampedtemp = (tmp2 !=0) ? (tmp2/3.0 * damptemp * (1.0 - DAMPF(p,i))) : 0.0; if(rampedtemp !=0.0) { zeta_finnis = zeta_0 * (tmp-rampedtemp) / sqrt(SQR(tmp) + SQR(rampedtemp*delta_finnis)) * DAMPF(p,i); } else zeta_finnis = zeta_0; /* new momenta */ IMPULS(p,i,X) += (-1.0*IMPULS(p,i,X) * zeta_finnis + KRAFT(p,i,X)) * timestep * (restrictions + sort)->x ; IMPULS(p,i,Y) += (-1.0*IMPULS(p,i,Y) * zeta_finnis + KRAFT(p,i,Y)) * timestep * (restrictions + sort)->y; #ifndef TWOD IMPULS(p,i,Z) += (-1.0*IMPULS(p,i,Z) * zeta_finnis + KRAFT(p,i,Z)) * timestep * (restrictions + sort)->z; #endif kin_energie_damp_2 = SPRODN(IMPULS,p,i,IMPULS,p,i); tot_kin_energy_damp += (kin_energie_damp_1 + kin_energie_damp_2) / (4.0 * MASSE(p,i)) ; } #endif /* DAMP */ #if defined (GLOK)|| defined(MIX) /* "Global Convergence": */ /* like mik, just with the global force and momentum vectors */ /* change to velocity norm, change names later... */ PxF += SPRODN(IMPULS,p,i,KRAFT,p,i)/MASSE(p,i); pnorm += SPRODN(IMPULS,p,i,IMPULS,p,i)/MASSE(p,i)/MASSE(p,i); #ifdef MIX /* global version of MIX with adaptive mixing */ /* 'turn' the velocities a little bit more along the forces... */ IMPULS(p,i,X) = (1.0-mix)*IMPULS(p,i,X) + mix * KRAFT(p,i,X) * mixforcescalefac * MASSE(p,i); IMPULS(p,i,Y) = (1.0-mix)*IMPULS(p,i,Y) + mix * KRAFT(p,i,Y) * mixforcescalefac * MASSE(p,i); IMPULS(p,i,Z) = (1.0-mix)*IMPULS(p,i,Z) + mix * KRAFT(p,i,Z) * mixforcescalefac * MASSE(p,i); #endif #endif /* GLOK || MIX */ #ifdef UNIAX dot = 2.0 * SPRODN(DREH_IMPULS,p,i,ACHSE,p,i); DREH_IMPULS(p,i,X) += timestep * DREH_MOMENT(p,i,X) - dot * ACHSE(p,i,X); DREH_IMPULS(p,i,Y) += timestep * DREH_MOMENT(p,i,Y) - dot * ACHSE(p,i,Y); DREH_IMPULS(p,i,Z) += timestep * DREH_MOMENT(p,i,Z) - dot * ACHSE(p,i,Z); #endif #ifndef DAMP kin_energie_2 = SPRODN(IMPULS,p,i,IMPULS,p,i); #endif #ifdef UNIAX rot_energie_2 = SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i); #endif #ifndef DAMP tot_kin_energy += (kin_energie_1 + kin_energie_2) / (4 * MASSE(p,i)); #endif #ifdef UNIAX tot_kin_energy += (rot_energie_1 + rot_energie_2) / (4 * uniax_inert); #endif #ifdef BER // if(steps%16==0) { IMPULS(p,i,X) *= cc; IMPULS(p,i,Y) *= cc; #ifndef TWOD IMPULS(p,i,Z) *= cc; #endif } #endif // **********************************************************************+ /* new positions */ tmp = timestep / MASSE(p,i); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) += tmp * IMPULS(p,i,Y); #ifndef TWOD ORT(p,i,Z) += tmp * IMPULS(p,i,Z); #endif // *********************************************************************** #ifdef RELAXINFO xnorm += tmp * tmp * SPRODN(IMPULS,p,i,IMPULS,p,i); /* determine the biggest force component */ tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,X)),tmp_x_max2); tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,Y)),tmp_x_max2); #ifndef TWOD tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,Z)),tmp_x_max2); #endif #endif #ifdef SHOCK if (shock_mode == 3 && steps > 0 ) { if (ORT(p,i,X) > box_x.x) { IMPULS(p,i,X) = -IMPULS(p,i,X); ORT(p,i,X) = 2 * box_x.x - ORT(p,i,X); } } if (shock_mode == 4) { real rand = shock_speed_l * timestep * steps ; if (ORT(p,i,X) < rand ) { IMPULS(p,i,X) = -IMPULS(p,i,X) + 2 * shock_speed_l * MASSE(p,i); ORT(p,i,X) = 2 * rand - ORT(p,i,X); } if (ORT(p,i,X) > box_x.x - rand ) { IMPULS(p,i,X) = -IMPULS(p,i,X) - 2 * shock_speed_r * MASSE(p,i); ORT(p,i,X) = 2 * ( box_x.x - rand ) - ORT(p,i,X); } } #endif #ifdef UNIAX /* new molecular axes */ cross.x = DREH_IMPULS(p,i,Y) * ACHSE(p,i,Z) - DREH_IMPULS(p,i,Z) * ACHSE(p,i,Y); cross.y = DREH_IMPULS(p,i,Z) * ACHSE(p,i,X) - DREH_IMPULS(p,i,X) * ACHSE(p,i,Z); cross.z = DREH_IMPULS(p,i,X) * ACHSE(p,i,Y) - DREH_IMPULS(p,i,Y) * ACHSE(p,i,X); ACHSE(p,i,X) += timestep * cross.x / uniax_inert; ACHSE(p,i,Y) += timestep * cross.y / uniax_inert; ACHSE(p,i,Z) += timestep * cross.z / uniax_inert; norm = SQRT( SPRODN(ACHSE,p,i,ACHSE,p,i) ); ACHSE(p,i,X) /= norm; ACHSE(p,i,Y) /= norm; ACHSE(p,i,Z) /= norm; #endif #ifdef STRESS_TENS if (do_press_calc) { #ifdef SHOCK PRESSTENS(p,i,xx) += (IMPULS(p,i,X) - PXAVG(p,i)) * (IMPULS(p,i,X) - PXAVG(p,i)) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += (IMPULS(p,i,X) - PXAVG(p,i)) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,xy) += (IMPULS(p,i,X) - PXAVG(p,i)) * IMPULS(p,i,Y) / MASSE(p,i); #else /* not SHOCK */ PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); #endif /* SHOCK */ } #endif /* STRESS_TENS */ } } #ifdef MPI { /* add up results from different CPUs */ int nc = 0; double tmpvec1[8], tmpvec2[8]; tmpvec1[nc++] = tot_kin_energy; tmpvec1[nc++] = fnorm; tmpvec1[nc++] = PxF; tmpvec1[nc++] = omega_E; tmpvec1[nc++] = pnorm; tmpvec1[nc++] = xnorm; #ifdef DAMP tmpvec1[nc++] = tot_kin_energy_damp; tmpvec1[nc++] = n_damp; #endif MPI_Allreduce( tmpvec1, tmpvec2, nc, REAL, MPI_SUM, cpugrid); nc = 0; tot_kin_energy = tmpvec2[nc++]; fnorm = tmpvec2[nc++]; PxF = tmpvec2[nc++]; omega_E = tmpvec2[nc++]; pnorm = tmpvec2[nc++]; xnorm = tmpvec2[nc++]; #ifdef DAMP tot_kin_energy_damp = tmpvec2[nc++]; n_damp = tmpvec2[nc++]; #endif } #ifdef FNORM MPI_Allreduce( &tmp_f_max2, &f_max2, 1, REAL, MPI_MAX, cpugrid); #endif #ifdef RELAXINFO MPI_Allreduce( &tmp_x_max2, &x_max2, 1, REAL, MPI_MAX, cpugrid); #endif #else /* not MPI */ #ifdef FNORM f_max2 = tmp_f_max2; #endif #ifdef RELAXINFO x_max2 = tmp_x_max2; #endif #endif /* MPI */ #if defined (GLOK) || defined (MIX) PxF /= (SQRT(fnorm) * SQRT(pnorm)); #endif #ifdef AND /* Andersen Thermostat -- Initialize the velocities now and then */ ++count; if ((tempintv!=0) && (0==count%tempintv)) maxwell(temperature); #endif } #else void move_atoms_nve(void) { if (myid==0) error("the chosen ensemble NVE is not supported by this binary"); } #endif /***************************************************************************** * * Basic NVE Integrator with TTM (two temperature model), stripped of most other options * *****************************************************************************/ #if defined(TTM) void move_atoms_ttm(void) { int k; /* static int count = 0;*/ real tmpvec1[8], tmpvec2[8]; tot_kin_energy = 0.0; #ifdef DEBUG E_ph_auf_local = 0.0; #endif /*DEBUG*/ //MYMOD #ifdef ELECPRESS double epressforce=0.0; #endif #ifdef PDECAY double a= 1.0/(ramp_end - ramp_start); a*=a; //HOTFIX fuer +/- y bnd #ifdef FDTD2D double ay0 =1.0/(ramp_y0max-ramp_y0min); double ay1 =1.0/(ramp_y1max-ramp_y1min); ay0*=ay0; ay1*=ay1; #endif #endif omega_E = 0.0; /* loop over all cells */ #ifdef _OPENMP #pragma omp parallel for reduction(+:tot_kin_energy,omega_E) #endif for (k=0; k<NCELLS; ++k) { /* loop over all cells */ int i,j, sort; int fd_i, fd_j, fd_k; double fd_xi; cell *p; real kin_energie_1, kin_energie_2, tmp; p = CELLPTR(k); #ifndef TTM1D fd_i=p->fd_cell_idx.x; fd_j=p->fd_cell_idx.y; fd_k=p->fd_cell_idx.z; /* get coupling constant, if fd cell is inactive, no coupling */ fd_xi=(l1[fd_i][fd_j][fd_k].natoms>=fd_min_atoms)?(l1[fd_i][fd_j][fd_k].xi):(0.0); #else //fd_xi=(l1[fd_i].natoms >= fd_min_atoms) ? (l1[fd_i].xi):(0.0); //Das funktioniert in diesem Fall nicht: //z.B. kümmert sich proc 0 immer um die TTM-Zellen ganz links auch wenn dort keine atome sind //gleichzeitig kann sich proc0 aber auch um außerhalb seines TTM-Bereichs kümmern... //--> Brauche globales xi-array //--> das geschieht in der schleife über atome weiter unten //Dasselbe gilt für vcom.x,y,z #endif // MY MOD : DEBUG // printf("proc:%d,steps:%d,i:%d,j:%d,k:%d,fd_xi:%e\n",myid,steps,fd_i,fd_j,fd_k,fd_xi); for (i=0; i<p->n; ++i) { /* loop over all atoms in the cell */ #ifdef TTM1D int i_global=0; if(SORTE(p,i)==0) { i_global=(int) (ORT(p,i,X)/fd_h.x); i_global=MIN(i_global,global_fd_dim.x-1); i_global=MAX(i_global,0); fd_xi=xiarr_global[i_global]; #ifdef ELECPRESS epressforce=epress_deriv[i_global]; #endif // if(NUMMER(p,i)==605) // printf("kraft:%.4e, eforce:%.4e\n",KRAFT(p,i,X),epressforce); } else { fd_xi=0.0; } #ifdef VLATTICE if(i_global >= last_active_cell_global) fx_xi=0.0; #ifdef ELECPRESS epressforce=0.0; #endif #endif #endif #ifdef DEBUG double delta_E_atom; #endif kin_energie_1 = SPRODN(IMPULS,p,i,IMPULS,p,i); sort = VSORTE(p,i); #if defined(FBC) /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif /* and set their force (->momentum) in restricted directions to 0 */ KRAFT(p,i,X) *= (restrictions + sort)->x; KRAFT(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD KRAFT(p,i,Z) *= (restrictions + sort)->z; #endif #ifdef EINSTEIN omega_E += SPRODN(KRAFT,p,i,KRAFT,p,i) / MASSE(p,i); #endif /* TTM: p += (F + xi*m*v_therm) * dt * ********************************** */ //MYMOD: Pdecay (mode=3) an dieser stelle um zusaetzliche loop zu vermeiden #ifdef PDECAY if( ORT(p,i,X) > ramp_start ) { #ifdef NRB if(NRBBND(p,i)==0) #endif KRAFT(p,i,X) -= ( IMPULS(p,i,X)/MASSE(p,i)) * xipdecay * a * ( ORT(p,i,X) - ramp_start ) * ( ORT(p,i,X) - ramp_start ); } #ifdef FDTD2D //HOTIFX fuer +/- y bnd //ACHTUNG: Ablatiertes Material nicht kuehlen! if(ORT(p,i,X)>=pdecay_surfx) { if( ORT(p,i,Y) > ramp_y1min ) KRAFT(p,i,Y) -= ( IMPULS(p,i,Y)/MASSE(p,i)) * xipdecay * ay1 * ( ORT(p,i,Y) - ramp_y1min ) * ( ORT(p,i,Y) - ramp_y1min ); else if(ORT(p,i,Y)< ramp_y0max) KRAFT(p,i,Y) -= ( IMPULS(p,i,Y)/MASSE(p,i)) * xipdecay * ay0 * ( ramp_y0max -ORT(p,i,Y) ) * ( ramp_y0max-ORT(p,i,Y) ); } #endif #endif #ifdef NRB if(NRBBND(p,i) == 0) //Nur nicht-bnd atome werden aufgeheizt! { #endif #ifdef TTM1D IMPULS(p,i,X) += timestep * ( KRAFT(p,i,X) + fd_xi * MASSE(p,i) * ( IMPULS(p,i,X)/MASSE(p,i) - vcomxglobal[i_global]) ); IMPULS(p,i,Y) += timestep * ( KRAFT(p,i,Y) + fd_xi * MASSE(p,i) * ( IMPULS(p,i,Y)/MASSE(p,i) - vcomyglobal[i_global]) ); IMPULS(p,i,Z) += timestep * ( KRAFT(p,i,Z) + fd_xi * MASSE(p,i) * ( IMPULS(p,i,Z)/MASSE(p,i) - vcomzglobal[i_global]) ); #ifdef ELECPRESS IMPULS(p,i,X) -= timestep *epressforce; #endif #else IMPULS(p,i,X) += timestep * ( KRAFT(p,i,X) + fd_xi * MASSE(p,i) * ( IMPULS(p,i,X)/MASSE(p,i) - l1[fd_i][fd_j][fd_k].vcomx) ); IMPULS(p,i,Y) += timestep * ( KRAFT(p,i,Y) + fd_xi * MASSE(p,i) * ( IMPULS(p,i,Y)/MASSE(p,i) - l1[fd_i][fd_j][fd_k].vcomy) ); IMPULS(p,i,Z) += timestep * ( KRAFT(p,i,Z) + fd_xi * MASSE(p,i) * ( IMPULS(p,i,Z)/MASSE(p,i) - l1[fd_i][fd_j][fd_k].vcomz) ); #endif #ifdef NRB } #endif /* MY MOD DEBUG */ //IMPULS(p,i,X)=0; //IMPULS(p,i,Y)=0; //IMPULS(p,i,Z)=0; kin_energie_2 = SPRODN(IMPULS,p,i,IMPULS,p,i); tot_kin_energy += (kin_energie_1 + kin_energie_2) / (4 * MASSE(p,i)); /* new positions */ tmp = timestep / MASSE(p,i); /** MY MOD: DEBUG */ //printf("proc:%d,steps:%d,fx:%e,fy:%e,fz:%e\n",myid,steps,KRAFT(p,i,X),KRAFT(p,i,Y),KRAFT(p,i,Z)); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) += tmp * IMPULS(p,i,Y); ORT(p,i,Z) += tmp * IMPULS(p,i,Z); #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif /* STRESS_TENS */ } } #ifdef MPI /* add up results from different CPUs */ tmpvec1[0] = tot_kin_energy; tmpvec1[3] = omega_E; #ifdef DEBUG tmpvec1[7] = E_ph_auf_local; #endif /*DEBUG*/ /* MPI_Allreduce( tmpvec1, tmpvec2, 5, REAL, MPI_SUM, cpugrid); */ MPI_Allreduce( tmpvec1, tmpvec2, 8, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmpvec2[0]; omega_E = tmpvec2[3]; #ifdef DEBUG E_ph_auf += tmpvec2[7]; #endif /*DEBUG*/ #endif /* MPI */ } #else void move_atoms_ttm(void) { if (myid==0) error("the chosen ensemble TTM is not supported by this binary"); } #endif /***************************************************************************** * * NVE Integrator with microconvergence relaxation * *****************************************************************************/ #ifdef MIK void move_atoms_mik(void) { int k; real tmpvec1[3], tmpvec2[3]; real tmp_f_max2=0.0; real tmp_x_max2=0.0; real mass = 0.006084; static int count = 0; /* implementation of adaptive mik time step */ tot_kin_energy = 0.0; fnorm = 0.0; xnorm = 0.0; // pnorm = 0.0; #ifdef AND /* Andersen Thermostat -- Initialize the velocities now and then */ ++count; if ((tempintv!=0) && (0==count%tempintv)) maxwell(temperature); #endif /* loop over all cells */ #ifdef _OPENMP #pragma omp parallel for reduction(+:tot_kin_energy,fnorm) #endif for (k=0; k<NCELLS; ++k) { int i, j, sort; cell *p; real kin_energie_1, kin_energie_2, tmp; #ifdef RIGID int satom; real relmass; #endif p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif IMPULS(p,i+j,X) = IMPULS(p,i,X); IMPULS(p,i+j,Y) = IMPULS(p,i,Y); #ifndef TWOD IMPULS(p,i+j,Z) = IMPULS(p,i,Z); #endif } #endif /* CLONE */ #ifdef SX #pragma vdir vector,nodep #endif for (i=0; i<p->n; ++i) { #ifdef EPITAX /* only substrate atoms are integrated by MIK */ if ( (NUMMER(p,i) > epitax_sub_n) && (POTENG(p,i) > epitax_ctrl * epitax_poteng_min) ) continue; #endif kin_energie_1 = SPRODN(IMPULS,p,i,IMPULS,p,i); sort = VSORTE(p,i); #ifdef RIGID if ( superatom[sort] > -1 ) { satom = superatom[sort]; relmass = MASSE(p,i) / supermass[satom]; if ( (superrestrictions + satom)->x ) KRAFT(p,i,X) = superforce[satom].x * relmass; if ( (superrestrictions + satom)->y ) KRAFT(p,i,Y) = superforce[satom].y * relmass; #ifndef TWOD if ( (superrestrictions + satom)->z ) KRAFT(p,i,Z) = superforce[satom].z * relmass; #endif } #endif #if defined(FBC) && !defined(RIGID) /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif /* FBC */ /* and set their force (->momentum) in restricted directions to 0 */ KRAFT(p,i,X) *= (restrictions + sort)->x; KRAFT(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD KRAFT(p,i,Z) *= (restrictions + sort)->z; #endif #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif IMPULS(p,i,X) += timestep * KRAFT(p,i,X); IMPULS(p,i,Y) += timestep * KRAFT(p,i,Y); #ifndef TWOD IMPULS(p,i,Z) += timestep * KRAFT(p,i,Z); #endif /* Mikroconvergence Algorithm - set velocity zero if a*v < 0 */ if (0.0 > SPRODN(IMPULS,p,i,KRAFT,p,i) ) { IMPULS(p,i,X) = 0.0; IMPULS(p,i,Y) = 0.0; #ifndef TWOD IMPULS(p,i,Z) = 0.0; #endif } else { /* new positions */ tmp = timestep / MASSE(p,i); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) += tmp * IMPULS(p,i,Y); #ifndef TWOD ORT(p,i,Z) += tmp * IMPULS(p,i,Z); #endif } #ifdef RELAXINFO // pnorm += SPRODN(IMPULS,p,i,IMPULS,p,i)/MASSE(p,i)/MASSE(p,i); xnorm += tmp * tmp* SPRODN(IMPULS,p,i,IMPULS,p,i); /* determine the biggest force component */ tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,X)),tmp_x_max2); tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,Y)),tmp_x_max2); #ifndef TWOD tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,Z)),tmp_x_max2); #endif #endif kin_energie_2 = SPRODN(IMPULS,p,i,IMPULS,p,i); /* sum up kinetic energy on this CPU */ tot_kin_energy += (kin_energie_1 + kin_energie_2) / (4.0 * MASSE(p,i)); #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif } } #ifdef MPI /* add up results from different CPUs */ tmpvec1[0] = tot_kin_energy; tmpvec1[1] = fnorm; tmpvec1[2] = xnorm; MPI_Allreduce( tmpvec1, tmpvec2, 3, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmpvec2[0]; fnorm = tmpvec2[1]; xnorm = tmpvec2[2]; #ifdef FNORM MPI_Allreduce( &tmp_f_max2, &f_max2, 1, REAL, MPI_MAX, cpugrid); #endif #ifdef RELAXINFO MPI_Allreduce( &tmp_x_max2, &x_max2, 1, REAL, MPI_MAX, cpugrid); #endif #else #ifdef FNORM f_max2 = tmp_f_max2; #endif #ifdef RELAXINFO x_max2 = tmp_x_max2; #endif #endif } #else void move_atoms_mik(void) { if (myid==0) error("the chosen ensemble MIK is not supported by this binary"); } #endif /***************************************************************************** * * NVT Integrator with Nose Hoover Thermostat * *****************************************************************************/ #ifdef NVT void move_atoms_nvt(void) { int k; real tmpvec1[6], tmpvec2[6], ttt; real E_kin_1 = 0.0, E_kin_2 = 0.0; real reibung, eins_d_reib; real E_rot_1 = 0.0, E_rot_2 = 0.0; real tmp_f_max2=0.0; real tmp_x_max2=0.0; #ifdef UNIAX real reibung_rot, eins_d_reib_rot; #endif fnorm = 0.0; omega_E = 0.0; reibung = 1.0 - eta * timestep / 2.0; eins_d_reib = 1.0 / (1.0 + eta * timestep / 2.0); #ifdef UNIAX reibung_rot = 1.0 - eta_rot * timestep / 2.0; eins_d_reib_rot = 1.0 / (1.0 + eta_rot * timestep / 2.0); #endif #ifdef _OPENMP #pragma omp parallel for reduction(+:E_kin_1,E_kin_2,E_rot_1,E_rot_2,fnorm,omega_E) #endif for (k=0; k<NCELLS; ++k) { /* loop over cells */ int i, j, sort; cell *p; real tmp; #ifdef UNIAX real dot, norm ; vektor cross ; #endif #ifdef RIGID int satom; real relmass; #endif p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif IMPULS(p,i+j,X) = IMPULS(p,i,X); IMPULS(p,i+j,Y) = IMPULS(p,i,Y); #ifndef TWOD IMPULS(p,i+j,Z) = IMPULS(p,i,Z); #endif } #endif /* CLONE */ for (i=0; i<p->n; ++i) { /* loop over atoms */ #ifdef EPITAX /* only substrate atoms are integrated by NVT */ if ( (NUMMER(p,i) > epitax_sub_n) && (POTENG(p,i) > epitax_ctrl * epitax_poteng_min) ) continue; #endif /* twice the old kinetic energy */ E_kin_1 += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef UNIAX E_rot_1 += SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i) / uniax_inert; #endif sort = VSORTE(p,i); #ifdef RIGID if ( superatom[sort] > -1 ) { satom = superatom[sort]; relmass = MASSE(p,i) / supermass[satom]; if ( (superrestrictions + satom)->x ) KRAFT(p,i,X) = superforce[satom].x * relmass; if ( (superrestrictions + satom)->y ) KRAFT(p,i,Y) = superforce[satom].y * relmass; #ifndef TWOD if ( (superrestrictions + satom)->z ) KRAFT(p,i,Z) = superforce[satom].z * relmass; #endif } #endif #if defined(FBC) && !defined(RIGID) /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif #if defined(FBC) && defined(BEND) /* give virtual particles their extra force */ KRAFT(p,i,X) += (bend_forces + sort)->x; KRAFT(p,i,Y) += (bend_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (bend_forces + sort)->z; #endif #endif KRAFT(p,i,X) *= (restrictions + sort)->x; KRAFT(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD KRAFT(p,i,Z) *= (restrictions + sort)->z; #endif #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif #ifdef EINSTEIN omega_E += SPRODN(KRAFT,p,i,KRAFT,p,i) / MASSE(p,i); #endif IMPULS(p,i,X) = (IMPULS(p,i,X) * reibung + timestep * KRAFT(p,i,X)) * eins_d_reib * (restrictions + sort)->x; IMPULS(p,i,Y) = (IMPULS(p,i,Y) * reibung + timestep * KRAFT(p,i,Y)) * eins_d_reib * (restrictions + sort)->y; #ifndef TWOD IMPULS(p,i,Z) = (IMPULS(p,i,Z) * reibung + timestep * KRAFT(p,i,Z)) * eins_d_reib * (restrictions + sort)->z; #endif #ifdef UNIAX /* new angular momenta */ dot = 2.0 * SPRODN(DREH_IMPULS,p,i,ACHSE,p,i); DREH_IMPULS(p,i,X) = eins_d_reib_rot * ( DREH_IMPULS(p,i,X) * reibung_rot + timestep * DREH_MOMENT(p,i,X) - dot * ACHSE(p,i,X) ); DREH_IMPULS(p,i,Y) = eins_d_reib_rot * ( DREH_IMPULS(p,i,Y) * reibung_rot + timestep * DREH_MOMENT(p,i,Y) - dot * ACHSE(p,i,Y) ); DREH_IMPULS(p,i,Z) = eins_d_reib_rot * ( DREH_IMPULS(p,i,Z) * reibung_rot + timestep * DREH_MOMENT(p,i,Z) - dot * ACHSE(p,i,Z) ); #endif /* twice the new kinetic energy */ E_kin_2 += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef UNIAX E_rot_2 += SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i) / uniax_inert; #endif /* new positions */ tmp = timestep / MASSE(p,i); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) += tmp * IMPULS(p,i,Y); #ifndef TWOD ORT(p,i,Z) += tmp * IMPULS(p,i,Z); #endif #ifdef RELAXINFO xnorm += tmp * tmp* SPRODN(IMPULS,p,i,IMPULS,p,i); /* determine the biggest force component */ tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,X)),tmp_x_max2); tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,Y)),tmp_x_max2); #ifndef TWOD tmp_x_max2 = MAX(SQR(tmp*IMPULS(p,i,Z)),tmp_x_max2); #endif #endif #ifdef UNIAX cross.x = DREH_IMPULS(p,i,Y) * ACHSE(p,i,Z) - DREH_IMPULS(p,i,Z) * ACHSE(p,i,Y); cross.y = DREH_IMPULS(p,i,Z) * ACHSE(p,i,X) - DREH_IMPULS(p,i,X) * ACHSE(p,i,Z); cross.z = DREH_IMPULS(p,i,X) * ACHSE(p,i,Y) - DREH_IMPULS(p,i,Y) * ACHSE(p,i,X); ACHSE(p,i,X) += timestep * cross.x / uniax_inert; ACHSE(p,i,Y) += timestep * cross.y / uniax_inert; ACHSE(p,i,Z) += timestep * cross.z / uniax_inert; norm = SQRT( SPRODN(ACHSE,p,i,ACHSE,p,i) ); ACHSE(p,i,X) /= norm; ACHSE(p,i,Y) /= norm; ACHSE(p,i,Z) /= norm; #endif #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif } } #ifdef UNIAX tot_kin_energy = ( E_kin_1 + E_kin_2 + E_rot_1 + E_rot_2 ) / 4.0; #else tot_kin_energy = ( E_kin_1 + E_kin_2 ) / 4.0; #endif #ifdef MPI /* add up results from different CPUs */ tmpvec1[0] = tot_kin_energy; tmpvec1[1] = E_kin_2; tmpvec1[2] = E_rot_2; tmpvec1[3] = fnorm; tmpvec1[4] = omega_E; tmpvec1[5] = xnorm; MPI_Allreduce( tmpvec1, tmpvec2, 6, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmpvec2[0]; E_kin_2 = tmpvec2[1]; E_rot_2 = tmpvec2[2]; fnorm = tmpvec2[3]; omega_E = tmpvec2[4]; xnorm = tmpvec2[5]; #ifdef FNORM MPI_Allreduce( &tmp_f_max2, &f_max2, 1, REAL, MPI_MAX, cpugrid); #endif #ifdef RELAXINFO MPI_Allreduce( &tmp_x_max2, &x_max2, 1, REAL, MPI_MAX, cpugrid); #endif #else #ifdef FNORM f_max2 = tmp_f_max2; #endif #ifdef RELAXINFO x_max2 = tmp_x_max2; #endif #endif /* MPI */ /* time evolution of constraints */ ttt = nactive * temperature; eta += timestep * (E_kin_2 / ttt - 1.0) * isq_tau_eta; #ifdef UNIAX ttt = nactive_rot * temperature; eta_rot += timestep * (E_rot_2 / ttt - 1.0) * isq_tau_eta_rot; #endif } #else void move_atoms_nvt(void) { if (myid==0) error("the chosen ensemble NVT is not supported by this binary"); } #endif /***************************************************************************** * * NVT Integrator with Nose Hoover Thermostat and some shearing (?) * *****************************************************************************/ #ifdef SLLOD void move_atoms_sllod(void) { int k; real tmpvec1[4], tmpvec2[4], ttt; real E_kin_1 = 0.0, E_kin_2 = 0.0; vektor reibung, eins_d_reib; real E_rot_1 = 0.0, E_rot_2 = 0.0; real tmp_f_max2=0.0; #ifdef UNIAX real reibung_rot, eins_d_reib_rot; #endif fnorm = 0.0; #ifdef TWOD reibung.x = 1.0 - (eta+shear_rate.x) * timestep / 2.0; eins_d_reib.x = 1.0 / (1.0 + (eta+shear_rate.x) * timestep / 2.0); reibung.y = 1.0 - (eta+shear_rate.y) * timestep / 2.0; eins_d_reib.y = 1.0 / (1.0 + (eta+shear_rate.y) * timestep / 2.0); #else reibung.x = 1.0 - (eta+shear_rate.z+shear_rate2.y) * timestep / 2.0; eins_d_reib.x = 1.0 / (1.0 + (eta+shear_rate.z+shear_rate2.y) * timestep / 2.0); reibung.y = 1.0 - (eta+shear_rate.x+shear_rate2.z) * timestep / 2.0; eins_d_reib.y = 1.0 / (1.0 + (eta+shear_rate.x+shear_rate2.z) * timestep / 2.0); reibung.z = 1.0 - (eta+shear_rate.y+shear_rate2.x) * timestep / 2.0; eins_d_reib.z = 1.0 / (1.0 + (eta+shear_rate.y+shear_rate2.x) * timestep / 2.0); #endif #ifdef UNIAX reibung_rot = 1.0 - eta_rot * timestep / 2.0; eins_d_reib_rot = 1.0 / (1.0 + eta_rot * timestep / 2.0); #endif #ifdef _OPENMP #pragma omp parallel for reduction(+:E_kin_1,E_kin_2,E_rot_1,E_rot_2,fnorm) #endif for (k=0; k<NCELLS; ++k) { int i; int sort; cell *p; real tmp; #ifdef UNIAX real dot, norm ; vektor cross ; #endif p = CELLPTR(k); for (i=0; i<p->n; ++i) { /* twice the old kinetic energy */ E_kin_1 += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef UNIAX E_rot_1 += SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i) / uniax_inert; #endif sort = VSORTE(p,i); #ifdef FBC /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif KRAFT(p,i,X) *= (restrictions + sort)->x; KRAFT(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD KRAFT(p,i,Z) *= (restrictions + sort)->z; #endif #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif IMPULS(p,i,X) = (IMPULS(p,i,X) * reibung.x + timestep * KRAFT(p,i,X)) * eins_d_reib.x * (restrictions + sort)->x; IMPULS(p,i,Y) = (IMPULS(p,i,Y) * reibung.y + timestep * KRAFT(p,i,Y)) * eins_d_reib.y * (restrictions + sort)->y; #ifndef TWOD IMPULS(p,i,Z) = (IMPULS(p,i,Z) * reibung.z + timestep * KRAFT(p,i,Z)) * eins_d_reib.z * (restrictions + sort)->z; #endif #ifdef UNIAX /* new angular momenta */ dot = 2.0 * SPRODN(DREH_IMPULS,p,i,ACHSE,p,i); DREH_IMPULS(p,i,X) = eins_d_reib_rot * ( DREH_IMPULS(p,i,X) * reibung_rot + timestep * DREH_MOMENT(p,i,X) - dot * ACHSE(p,i,X) ); DREH_IMPULS(p,i,Y) = eins_d_reib_rot * ( DREH_IMPULS(p,i,Y) * reibung_rot + timestep * DREH_MOMENT(p,i,Y) - dot * ACHSE(p,i,Y) ); DREH_IMPULS(p,i,Z) = eins_d_reib_rot * ( DREH_IMPULS(p,i,Z) * reibung_rot + timestep * DREH_MOMENT(p,i,Z) - dot * ACHSE(p,i,Z) ); #endif /* twice the new kinetic energy */ E_kin_2 += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef UNIAX E_rot_2 += SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i) / uniax_inert; #endif /* new positions */ tmp = timestep / MASSE(p,i); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) += tmp * IMPULS(p,i,Y); #ifndef TWOD ORT(p,i,Z) += tmp * IMPULS(p,i,Z); /* sllod specific */ ORT(p,i,X) += shear_rate.z * ORT(p,i,Y); ORT(p,i,X) += shear_rate2.y * ORT(p,i,Z); ORT(p,i,Y) += shear_rate.x * ORT(p,i,Z); ORT(p,i,Y) += shear_rate2.z * ORT(p,i,X); ORT(p,i,Z) += shear_rate.y * ORT(p,i,X); ORT(p,i,Z) += shear_rate2.x * ORT(p,i,Y); #else ORT(p,i,X) += shear_rate.x * ORT(p,i,Y); ORT(p,i,Y) += shear_rate.y * ORT(p,i,X); #endif #ifdef UNIAX cross.x = DREH_IMPULS(p,i,Y) * ACHSE(p,i,Z) - DREH_IMPULS(p,i,Z) * ACHSE(p,i,Y); cross.y = DREH_IMPULS(p,i,Z) * ACHSE(p,i,X) - DREH_IMPULS(p,i,X) * ACHSE(p,i,Z); cross.z = DREH_IMPULS(p,i,X) * ACHSE(p,i,Y) - DREH_IMPULS(p,i,Y) * ACHSE(p,i,X); ACHSE(p,i,X) += timestep * cross.x / uniax_inert; ACHSE(p,i,Y) += timestep * cross.y / uniax_inert; ACHSE(p,i,Z) += timestep * cross.z / uniax_inert; norm = SQRT( SPRODN(ACHSE,p,i,ACHSE,p,i) ); ACHSE(p,i,X) /= norm; ACHSE(p,i,Y) /= norm; ACHSE(p,i,Z) /= norm; #endif #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif } } #ifdef UNIAX tot_kin_energy = ( E_kin_1 + E_kin_2 + E_rot_1 + E_rot_2 ) / 4.0; #else tot_kin_energy = ( E_kin_1 + E_kin_2 ) / 4.0; #endif #ifdef MPI /* add up results from different CPUs */ tmpvec1[0] = tot_kin_energy; tmpvec1[1] = E_kin_2; tmpvec1[2] = E_rot_2; tmpvec1[3] = fnorm; MPI_Allreduce( tmpvec1, tmpvec2, 4, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmpvec2[0]; E_kin_2 = tmpvec2[1]; E_rot_2 = tmpvec2[2]; fnorm = tmpvec2[3]; #ifdef FNORM MPI_Allreduce( &tmp_f_max2, &f_max2, 1, REAL, MPI_MAX, cpugrid); #endif #else #ifdef FNORM f_max2 = tmp_f_max2; #endif #endif /* adjusting the box */ #ifdef TWOD box_x.y += shear_rate.y*box_y.y; box_y.x += shear_rate.x*box_x.x; #else box_y.x += shear_rate.z * box_y.y; box_z.x += shear_rate2.y * box_z.z; box_z.y += shear_rate.x * box_z.z; box_x.y += shear_rate2.z * box_x.x; box_x.z += shear_rate.y * box_x.x; box_y.z += shear_rate2.x * box_y.y; #endif make_box(); /* time evolution of constraints */ ttt = nactive * temperature; eta += timestep * (E_kin_2 / ttt - 1.0) * isq_tau_eta; #ifdef UNIAX ttt = nactive_rot * temperature; eta_rot += timestep * (E_rot_2 / ttt - 1.0) * isq_tau_eta_rot; #endif } #else void move_atoms_sllod(void) { if (myid==0) error("the chosen ensemble SLLOD is not supported by this binary"); } #endif #ifdef NPT /****************************************************************************** * * compute initial dynamical pressure * ******************************************************************************/ void calc_dyn_pressure(void) { int k; real tmpvec1[5], tmpvec2[5]; /* initialize data */ dyn_stress_x = 0.0; dyn_stress_y = 0.0; dyn_stress_z = 0.0; Ekin_old = 0.0; Erot_old = 0.0; /* loop over all cells */ #ifdef _OPENMP #pragma omp parallel for reduction(+:dyn_stress_x,dyn_stress_y,dyn_stress_z,Ekin_old,Erot_old) #endif for (k=0; k<NCELLS; ++k) { int i; cell *p; real tmp; p = CELLPTR(k); /* loop over atoms in cell */ for (i=0; i<p->n; ++i) { tmp = 1.0 / MASSE(p,i); dyn_stress_x += IMPULS(p,i,X) * IMPULS(p,i,X) * tmp; dyn_stress_y += IMPULS(p,i,Y) * IMPULS(p,i,Y) * tmp; #ifndef TWOD dyn_stress_z += IMPULS(p,i,Z) * IMPULS(p,i,Z) * tmp; #endif #ifdef UNIAX Erot_old += SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i) / uniax_inert; #endif } } /* twice the kinetic energy */ Ekin_old = dyn_stress_x + dyn_stress_y; #ifndef TWOD Ekin_old += dyn_stress_z; #endif #ifdef MPI /* add up results from different CPUs */ tmpvec1[0] = dyn_stress_x; tmpvec1[1] = dyn_stress_y; tmpvec1[2] = dyn_stress_z; tmpvec1[3] = Ekin_old; tmpvec1[4] = Erot_old; MPI_Allreduce( tmpvec1, tmpvec2, 5, REAL, MPI_SUM, cpugrid); dyn_stress_x = tmpvec2[0]; dyn_stress_y = tmpvec2[1]; dyn_stress_z = tmpvec2[2]; Ekin_old = tmpvec2[3]; Erot_old = tmpvec2[4]; #endif } #endif /* NPT */ /****************************************************************************** * * NPT Integrator with Nose Hoover Thermostat * ******************************************************************************/ #ifdef NPT_iso void move_atoms_npt_iso(void) { int k; real Ekin_new = 0.0, Erot_new = 0.0; real pfric, pifric, rfric, rifric; real tmpvec1[5], tmpvec2[5], ttt; real reib, ireib; real tmp_f_max2=0.0; static real d_pressure; PxF = 0.0; #ifdef RELAXINFO real tmp_x_max2=0.0; real tmppnorm, tmpfnorm,tmportx,tmporty,tmportz; xnorm = 0.0; pnorm = 0.0; #endif if (steps == steps_min) { calc_dyn_pressure(); if (isq_tau_xi==0.0) xi.x = 0.0; } fnorm = 0.0; omega_E = 0.0; #ifdef UNIAX pressure = (0.6 * (Ekin_old + Erot_old) + virial) / (DIM * volume); #else pressure = (Ekin_old + virial) / (DIM * volume) ; #endif /* time evolution of xi */ xi_old.x = xi.x; xi.x += timestep * (pressure-pressure_ext.x) * volume * isq_tau_xi / nactive; /* some constants used later on */ pfric = 1.0 - (xi_old.x + eta) * timestep / 2.0; pifric = 1.0 / (1.0 + (xi.x + eta) * timestep / 2.0); rfric = 1.0 + (xi.x ) * timestep / 2.0; rifric = 1.0 / (1.0 - (xi.x ) * timestep / 2.0); #ifdef UNIAX reib = 1.0 - eta_rot * timestep / 2.0; ireib = 1.0 / (1.0 + eta_rot * timestep / 2.0); #endif /* loop over all cells */ #ifdef _OPENMP #pragma omp parallel for reduction(+:Ekin_new,Erot_new,fnorm,omega_E) #endif for (k=0; k<NCELLS; ++k) { int i, j; cell *p; real tmp; #ifdef UNIAX real dot, norm ; vektor cross ; #endif p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif IMPULS(p,i+j,X) = IMPULS(p,i,X); IMPULS(p,i+j,Y) = IMPULS(p,i,Y); #ifndef TWOD IMPULS(p,i+j,Z) = IMPULS(p,i,Z); #endif } #endif /* CLONE */ for (i=0; i<p->n; ++i) { #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif #ifdef EINSTEIN omega_E += SPRODN(KRAFT,p,i,KRAFT,p,i) / MASSE(p,i); #endif #ifdef RELAXINFO PxF += SPRODN(IMPULS,p,i,KRAFT,p,i)/MASSE(p,i); pnorm += SPRODN(IMPULS,p,i,IMPULS,p,i)/MASSE(p,i)/MASSE(p,i); #endif /* new momenta */ IMPULS(p,i,X) = (pfric*IMPULS(p,i,X)+timestep*KRAFT(p,i,X))*pifric; IMPULS(p,i,Y) = (pfric*IMPULS(p,i,Y)+timestep*KRAFT(p,i,Y))*pifric; #ifndef TWOD IMPULS(p,i,Z) = (pfric*IMPULS(p,i,Z)+timestep*KRAFT(p,i,Z))*pifric; #endif #ifdef UNIAX /* new angular momenta */ dot = 2.0 * SPRODN(DREH_IMPULS,p,i,ACHSE,p,i); DREH_IMPULS(p,i,X) = ireib * ( DREH_IMPULS(p,i,X) * reib + timestep * DREH_MOMENT(p,i,X) - dot * ACHSE(p,i,X) ); DREH_IMPULS(p,i,Y) = ireib * ( DREH_IMPULS(p,i,Y) * reib + timestep * DREH_MOMENT(p,i,Y) - dot * ACHSE(p,i,Y) ); DREH_IMPULS(p,i,Z) = ireib * ( DREH_IMPULS(p,i,Z) * reib + timestep * DREH_MOMENT(p,i,Z) - dot * ACHSE(p,i,Z) ); #endif /* twice the new kinetic energy */ Ekin_new += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef UNIAX Erot_new += SPRODN(DREH_IMPULS,p,i,DREH_IMPULS,p,i) / uniax_inert; #endif /* new positions */ #ifdef RELAXINFO tmportx=ORT(p,i,X); tmporty=ORT(p,i,Y); tmportz=ORT(p,i,Z); #endif tmp = timestep / MASSE(p,i); ORT(p,i,X) = (rfric * ORT(p,i,X) + IMPULS(p,i,X) * tmp) * rifric; ORT(p,i,Y) = (rfric * ORT(p,i,Y) + IMPULS(p,i,Y) * tmp) * rifric; #ifndef TWOD ORT(p,i,Z) = (rfric * ORT(p,i,Z) + IMPULS(p,i,Z) * tmp) * rifric; #endif #ifdef RELAXINFO xnorm += SQR(ORT(p,i,X)-tmportx) + SQR(ORT(p,i,Y)-tmporty)+SQR(ORT(p,i,Z)-tmportz); /* determine the biggest displacement component */ tmp_x_max2 = MAX( SQR(ORT(p,i,X)-tmportx),tmp_x_max2); tmp_x_max2 = MAX( SQR(ORT(p,i,Y)-tmporty),tmp_x_max2); #ifndef TWOD tmp_x_max2 = MAX( SQR(ORT(p,i,Z)-tmportz),tmp_x_max2); #endif #endif #ifdef UNIAX /* new molecular axes */ cross.x = DREH_IMPULS(p,i,Y) * ACHSE(p,i,Z) - DREH_IMPULS(p,i,Z) * ACHSE(p,i,Y); cross.y = DREH_IMPULS(p,i,Z) * ACHSE(p,i,X) - DREH_IMPULS(p,i,X) * ACHSE(p,i,Z); cross.z = DREH_IMPULS(p,i,X) * ACHSE(p,i,Y) - DREH_IMPULS(p,i,Y) * ACHSE(p,i,X); ACHSE(p,i,X) += timestep * cross.x / uniax_inert; ACHSE(p,i,Y) += timestep * cross.y / uniax_inert; ACHSE(p,i,Z) += timestep * cross.z / uniax_inert; norm = SQRT( SPRODN(ACHSE,p,i,ACHSE,p,i) ); ACHSE(p,i,X) /= norm; ACHSE(p,i,Y) /= norm; ACHSE(p,i,Z) /= norm; #endif #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif } } #ifdef MPI /* add up results from all CPUs */ tmpvec1[0] = Ekin_new; tmpvec1[1] = Erot_new; tmpvec1[2] = fnorm; tmpvec1[3] = omega_E; tmpvec1[4] = PxF; MPI_Allreduce( tmpvec1, tmpvec2, 5, REAL, MPI_SUM, cpugrid); Ekin_new = tmpvec2[0]; Erot_new = tmpvec2[1]; fnorm = tmpvec2[2]; omega_E = tmpvec2[3]; PxF = tmpvec2[4]; #ifdef FNORM MPI_Allreduce( &tmp_f_max2, &f_max2, 1, REAL, MPI_MAX, cpugrid); #endif #ifdef RELAXINFO MPI_Allreduce( &tmp_x_max2, &x_max2, 1, REAL, MPI_MAX, cpugrid); #endif #else #ifdef FNORM f_max2 = tmp_f_max2; #endif #ifdef RELAXINFO x_max2=tmp_x_max2; #endif #endif #ifdef UNIAX tot_kin_energy = ( Ekin_old + Ekin_new + Erot_old + Erot_new) / 4.0; #else tot_kin_energy = ( Ekin_old + Ekin_new ) / 4.0; #endif /* time evolution of eta */ ttt = nactive * temperature; eta += timestep * (Ekin_new / ttt - 1.0) * isq_tau_eta; Ekin_old = Ekin_new; #ifdef UNIAX ttt = nactive_rot * temperature; eta_rot += timestep * (Erot_new / ttt - 1.0) * isq_tau_eta_rot; Erot_old = Erot_new; #endif /* time evolution of box size */ ttt = (1.0 + xi.x * timestep / 2.0) / (1.0 - xi.x * timestep / 2.0); if (ttt<0) error("box size has become negative!"); box_x.x *= ttt; box_x.y *= ttt; box_y.x *= ttt; box_y.y *= ttt; #ifndef TWOD box_x.z *= ttt; box_y.z *= ttt; box_z.x *= ttt; box_z.y *= ttt; box_z.z *= ttt; #endif make_box(); /* increment external pressure */ if (steps == steps_min) { if (use_curr_pressure==1) { pressure_ext.x = pressure; use_curr_pressure = 0; } d_pressure = (pressure_end.x - pressure_ext.x) / (steps_max - steps_min); } pressure_ext.x += d_pressure; } #else void move_atoms_npt_iso(void) { if (myid==0) error("the chosen ensemble NPT_ISO is not supported by this binary"); } #endif /****************************************************************************** * * NPT Integrator with Nose Hoover Thermostat * ******************************************************************************/ #ifdef NPT_axial void move_atoms_npt_axial(void) { int k, sort; real tmp_f_max2=0.0; real Ekin_new = 0.0, ttt, tmpvec1[6], tmpvec2[6]; vektor pfric, pifric, rfric, rifric, tvec; static vektor d_pressure; if (steps == steps_min) { calc_dyn_pressure(); if (isq_tau_xi==0.0) { xi.x = 0.0; xi.y = 0.0; #ifndef TWOD xi.z = 0.0; #endif } xi.x *= relax_dirs.x; xi.y *= relax_dirs.y; #ifndef TWOD xi.z *= relax_dirs.z; #endif } fnorm = 0.0; omega_E = 0.0; stress_x = (dyn_stress_x + vir_xx) / volume; dyn_stress_x = 0.0; stress_y = (dyn_stress_y + vir_yy) / volume; dyn_stress_y = 0.0; #ifndef TWOD stress_z = (dyn_stress_z + vir_zz) / volume; dyn_stress_z = 0.0; #endif /* time evolution of xi */ ttt = timestep * volume * isq_tau_xi / nactive; xi_old.x = xi.x; xi.x += ttt * (stress_x - pressure_ext.x) * relax_dirs.x; xi_old.y = xi.y; xi.y += ttt * (stress_y - pressure_ext.y) * relax_dirs.y; #ifndef TWOD xi_old.z = xi.z; xi.z += ttt * (stress_z - pressure_ext.z) * relax_dirs.z; #endif /* some constants used later on */ pfric.x = 1.0 - (xi_old.x + eta) * timestep / 2.0; pifric.x = 1.0 / (1.0 + (xi.x + eta) * timestep / 2.0); rfric.x = 1.0 + (xi.x ) * timestep / 2.0; rifric.x = 1.0 / (1.0 - (xi.x ) * timestep / 2.0); pfric.y = 1.0 - (xi_old.y + eta) * timestep / 2.0; pifric.y = 1.0 / (1.0 + (xi.y + eta) * timestep / 2.0); rfric.y = 1.0 + (xi.y ) * timestep / 2.0; rifric.y = 1.0 / (1.0 - (xi.y ) * timestep / 2.0); #ifndef TWOD pfric.z = 1.0 - (xi_old.z + eta) * timestep / 2.0; pifric.z = 1.0 / (1.0 + (xi.z + eta) * timestep / 2.0); rfric.z = 1.0 + (xi.z ) * timestep / 2.0; rifric.z = 1.0 / (1.0 - (xi.z ) * timestep / 2.0); #endif /* loop over all cells */ #ifdef _OPENMP #pragma omp parallel for reduction(+:Ekin,dyn_stress_x,dyn_stress_y,dyn_stress_z,fnorm,omega_E) #endif for (k=0; k<NCELLS; ++k) { int i, sort; cell *p; real tmp; p = CELLPTR(k); /* loop over atoms in cell */ for (i=0; i<p->n; ++i) { tmp = 1.0 / MASSE(p,i); #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif #ifdef EINSTEIN omega_E += SPRODN(KRAFT,p,i,KRAFT,p,i) * tmp; #endif #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) * tmp; PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) * tmp; #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) * tmp; PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) * tmp; PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) * tmp; #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) * tmp; } #endif /* new momenta */ IMPULS(p,i,X) = (pfric.x * IMPULS(p,i,X) + timestep * KRAFT(p,i,X)) * pifric.x; IMPULS(p,i,Y) = (pfric.y * IMPULS(p,i,Y) + timestep * KRAFT(p,i,Y)) * pifric.y; #ifndef TWOD IMPULS(p,i,Z) = (pfric.z * IMPULS(p,i,Z) + timestep * KRAFT(p,i,Z)) * pifric.z; #endif sort = VSORTE(p,i); /* and set their force (->momentum) in restricted directions to 0 */ IMPULS(p,i,X) *= (restrictions + sort)->x; IMPULS(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD IMPULS(p,i,Z) *= (restrictions + sort)->z; #endif /* new stress tensor (dynamic part only) */ dyn_stress_x += IMPULS(p,i,X) * IMPULS(p,i,X) * tmp; dyn_stress_y += IMPULS(p,i,Y) * IMPULS(p,i,Y) * tmp; #ifndef TWOD dyn_stress_z += IMPULS(p,i,Z) * IMPULS(p,i,Z) * tmp; #endif /* twice the new kinetic energy */ Ekin_new += SPRODN(IMPULS,p,i,IMPULS,p,i) * tmp; /* new positions */ tmp *= timestep; ORT(p,i,X) = (rfric.x * ORT(p,i,X) + IMPULS(p,i,X) * tmp) * rifric.x; ORT(p,i,Y) = (rfric.y * ORT(p,i,Y) + IMPULS(p,i,Y) * tmp) * rifric.y; #ifndef TWOD ORT(p,i,Z) = (rfric.z * ORT(p,i,Z) + IMPULS(p,i,Z) * tmp) * rifric.z; #endif } } #ifdef MPI /* add up results from different CPUs */ tmpvec1[0] = Ekin_new; tmpvec1[1] = fnorm; tmpvec1[2] = dyn_stress_x; tmpvec1[3] = dyn_stress_y; tmpvec1[4] = dyn_stress_z; tmpvec1[5] = omega_E; MPI_Allreduce( tmpvec1, tmpvec2, 6, REAL, MPI_SUM, cpugrid); Ekin_new = tmpvec2[0]; fnorm = tmpvec2[1]; dyn_stress_x = tmpvec2[2]; dyn_stress_y = tmpvec2[3]; dyn_stress_z = tmpvec2[4]; omega_E = tmpvec2[5]; #ifdef FNORM MPI_Allreduce( &tmp_f_max2, &f_max2, 1, REAL, MPI_MAX, cpugrid); #endif #else #ifdef FNORM f_max2 = tmp_f_max2; #endif #endif /* time evolution of eta */ tot_kin_energy = ( Ekin_old + Ekin_new ) / 4.0; ttt = nactive * temperature; eta += timestep * (Ekin_new / ttt - 1.0) * isq_tau_eta; Ekin_old = Ekin_new; /* time evolution of box size */ tvec.x = (1.0 + xi.x * timestep / 2.0) / (1.0 - xi.x * timestep / 2.0); tvec.y = (1.0 + xi.y * timestep / 2.0) / (1.0 - xi.y * timestep / 2.0); if ((tvec.x<0) || (tvec.y<0)) error("box size has become negative!"); box_x.x *= tvec.x; box_x.y *= tvec.x; box_y.x *= tvec.y; box_y.y *= tvec.y; #ifndef TWOD tvec.z = (1.0 + xi.z * timestep / 2.0) / (1.0 - xi.z * timestep / 2.0); if (tvec.z<0) error("box size has become negative!"); box_x.z *= tvec.x; box_y.z *= tvec.y; box_z.x *= tvec.z; box_z.y *= tvec.z; box_z.z *= tvec.z; #endif make_box(); /* increment external pressure */ if (steps == steps_min) { if (use_curr_pressure==1) { pressure_ext.x = stress_x; pressure_ext.y = stress_y; #ifndef TWOD pressure_ext.z = stress_z; #endif use_curr_pressure = 0; } d_pressure.x = (pressure_end.x-pressure_ext.x) / (steps_max-steps_min); d_pressure.y = (pressure_end.y-pressure_ext.y) / (steps_max-steps_min); #ifndef TWOD d_pressure.z = (pressure_end.z-pressure_ext.z) / (steps_max-steps_min); #endif } pressure_ext.x += d_pressure.x; pressure_ext.y += d_pressure.y; #ifndef TWOD pressure_ext.z += d_pressure.z; #endif } #else void move_atoms_npt_axial(void) { if (myid==0) error("the chosen ensemble NPT_AXIAL is not supported by this binary"); } #endif /***************************************************************************** * * NVE Integrator with stadium damping and fixed borders * for fracture studies * *****************************************************************************/ #ifdef FRAC void move_atoms_frac(void) { int k; real tmpvec1[6], tmpvec2[6], ttt; real tmp_f_max2=0.0; real E_kin_1 = 0.0, E_kin_2 = 0.0; real E_kin_damp1 = 0.0, E_kin_damp2 = 0.0; real E_kin_stadium1 = 0.0, E_kin_stadium2 = 0.0; real reibung, reibung_y, eins_d_reib, eins_d_reib_y; real epsilontmp, eins_d_epsilontmp; real tmp, f; /* stadium function: the bath tub !!!!*/ fnorm = 0.0; sum_f = 0.0; n_stadium = 0; if(expansionmode==1) dotepsilon = dotepsilon0 / (1.0 + dotepsilon0 * steps * timestep); /* loop over all cells */ #ifdef _OPENMP #pragma omp parallel for reduction(+:E_kin_1,E_kin_2,E_kin_damp1,E_kin_damp2,E_kin_stadium1,E_kin_stadium2,sum_f,n_stadium,fnorm) #endif for (k=0; k<NCELLS; ++k){ int i, j, sort; cell *p; real tmp,tmp1,tmp2; p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif IMPULS(p,i+j,X) = IMPULS(p,i,X); IMPULS(p,i+j,Y) = IMPULS(p,i,Y); #ifndef TWOD IMPULS(p,i+j,Z) = IMPULS(p,i,Z); #endif } #endif /* CLONE */ /* loop over all atoms in the cell */ for (i=0; i<p->n; ++i) { /* if half axis in x-direction is zero: global viscous damping ! */ if(stadium.x <= 0.0) { f = 1.0; } else { /* Calculate stadium function f */ tmp1 = SQR((ORT(p,i,X)-center.x)/(2.0*stadium2.x)); tmp2 = SQR((ORT(p,i,Y)-center.y)/(2.0*stadium2.y)); f = (tmp1+tmp2-SQR(stadium.x/(2.0*stadium2.x)))/\ (.25- SQR(stadium.x/(2.0*stadium2.x))); } if (f<= 0.0) { f = 0.0; n_stadium += DIM; /* what about the restrictions?? */ } if (f>1.0) f = 1.0; /* we smooth the stadium function: to get a real bath tub !*/ f = .5 * (1 + sin(-M_PI/2.0 + M_PI*f)); sort = VSORTE(p,i); /* add up f considering the restriction vector */ #ifdef TWOD sum_f+= f * ( (restrictions + sort)->x + (restrictions + sort)->y )/2.0; #else sum_f+= f * ( (restrictions + sort)->x + (restrictions + sort)->y + (restrictions + sort)->z )/3.0; #endif /* twice the old kinetic energy */ tmp = SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); E_kin_1 += tmp; if (f == 0.0) E_kin_stadium1 += tmp; if (f > 0.0) E_kin_damp1 += f * tmp; #ifdef FBC /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif KRAFT(p,i,X) *= (restrictions + sort)->x; KRAFT(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD KRAFT(p,i,Z) *= (restrictions + sort)->z; #endif #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif reibung = 1.0 - gamma_damp * f * timestep / 2.0; eins_d_reib = 1.0 / (1.0 + gamma_damp * f * timestep / 2.0); reibung_y = 1.0 - (gamma_damp * f + dotepsilon) * timestep / 2.0; eins_d_reib_y = 1.0 / (1.0 + (gamma_damp * f + dotepsilon) * timestep / 2.0); /* new momenta */ IMPULS(p,i,X) = (IMPULS(p,i,X) * reibung + timestep * KRAFT(p,i,X)) * eins_d_reib * (restrictions + sort)->x; IMPULS(p,i,Y) = (IMPULS(p,i,Y) * reibung_y + timestep * KRAFT(p,i,Y)) * eins_d_reib_y * (restrictions + sort)->y; #ifndef TWOD IMPULS(p,i,Z) = (IMPULS(p,i,Z) * reibung + timestep * KRAFT(p,i,Z)) * eins_d_reib * (restrictions + sort)->z; #endif /* twice the new kinetic energy */ tmp = SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); E_kin_2 += tmp; if (f == 0.0) E_kin_stadium2 += tmp; if (f > 0.0) E_kin_damp2 += f * tmp; /* new positions */ tmp = timestep / MASSE(p,i); epsilontmp = 1.0 + dotepsilon * timestep / 2.0; eins_d_epsilontmp = 1.0 / (1.0 - dotepsilon * timestep / 2.0); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) = (tmp * IMPULS(p,i,Y) + epsilontmp * ORT(p,i,Y)) * eins_d_epsilontmp; #ifndef TWOD ORT(p,i,Z) += tmp * IMPULS(p,i,Z); #endif #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif } } tot_kin_energy = ( E_kin_1 + E_kin_2 ) / 4.0; E_kin_stadium = ( E_kin_stadium1 + E_kin_stadium2 ) / 4.0; E_kin_damp = ( E_kin_damp1 + E_kin_damp2 ) / 4.0; #ifdef MPI /* add up results from different CPUs */ tmpvec1[0] = tot_kin_energy; tmpvec1[1] = E_kin_stadium; tmpvec1[2] = E_kin_damp; tmpvec1[3] = E_kin_damp2; tmpvec1[4] = n_stadium; tmpvec1[5] = sum_f; MPI_Allreduce( tmpvec1, tmpvec2, 6, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmpvec2[0]; E_kin_stadium = tmpvec2[1]; E_kin_damp = tmpvec2[2]; E_kin_damp2 = tmpvec2[3]; n_stadium = tmpvec2[4]; sum_f = tmpvec2[5]; #endif ttt = DIM * temperature * sum_f; /* time evolution of constraints */ /* dampingmode: 0 -> viscous damping (default); 1 -> Nose-Hoover; */ if(dampingmode == 1){ gamma_damp += timestep * (E_kin_damp2 / ttt - 1.0) * gamma_bar; } else { if( E_kin_damp2 != 0.0){ gamma_damp = (1.0 - ttt / E_kin_damp2) * gamma_bar; } else { gamma_damp = 0.0; } } } #else void move_atoms_frac(void) { if (myid==0) error("the chosen ensemble FRAC is not supported by this binary"); } #endif /***************************************************************************** * * Integrator for fracture studies with temperature gradient * * *****************************************************************************/ #ifdef FTG void move_atoms_ftg(void) { int j, k; real tmp_f_max2=0.0; static real *E_kin_1 = NULL; static real *E_kin_2 = NULL; static real *ftgtmpvec1 = NULL; static real *ftgtmpvec2 = NULL; static int *iftgtmpvec1 = NULL; static int *iftgtmpvec2 = NULL; real tmp,tmp1,tmp2; real ttt; real reibung, reibung_y, eins_d_reib, eins_d_reib_y; real epsilontmp, eins_d_epsilontmp; int slice; real gamma_tmp; /* alloc vector versions of E_kin and ftgtmpvect*/ if (NULL==E_kin_1) { E_kin_1=(real*) malloc(nslices*sizeof(real)); if (NULL==E_kin_1) error("Cannot allocate memory for E_kin_1 vector\n"); } if (NULL==E_kin_2) { E_kin_2=(real*) malloc(nslices*sizeof(real)); if (NULL==E_kin_2) error("Cannot allocate memory for E_kin_2 vector\n"); } if (NULL==ftgtmpvec1) { ftgtmpvec1=(real*) malloc(nslices*sizeof(real)); if (NULL==ftgtmpvec1) error("Cannot allocate memory for ftgtmpvec1 vector\n"); } if (NULL==ftgtmpvec2) { ftgtmpvec2=(real*) malloc(nslices*sizeof(real)); if (NULL==ftgtmpvec2) error("Cannot allocate memory for ftgtmpvec2 vector\n"); } if (NULL==iftgtmpvec1) { iftgtmpvec1=(int*) malloc(nslices*sizeof(int)); if (NULL==iftgtmpvec1) error("Cannot allocate memory for iftgtmpvec1 vector\n"); } if (NULL==iftgtmpvec2) { iftgtmpvec2=(int*) malloc(nslices*sizeof(int)); if (NULL==iftgtmpvec2) error("Cannot allocate memory for iftgtmpvec2 vector\n"); } for (j=0; j<nslices; j++) { *(E_kin_1 +j) = 0.0; *(E_kin_2 +j) = 0.0; *(ninslice +j) = 0; } fnorm = 0.0; if(expansionmode==1) dotepsilon = dotepsilon0 / (1.0 + dotepsilon0 * steps * timestep); /* loop over all cells */ for (k=0; k<NCELLS; ++k) { int i, j, sort; cell *p; p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif IMPULS(p,i+j,X) = IMPULS(p,i,X); IMPULS(p,i+j,Y) = IMPULS(p,i,Y); #ifndef TWOD IMPULS(p,i+j,Z) = IMPULS(p,i,Z); #endif } #endif /* CLONE */ /* loop over all atoms in cell */ for (i=0; i<p->n; ++i) { sort = VSORTE(p,i); /* calc slice */ tmp = ORT(p,i,X)/box_x.x; slice = (int) (nslices *tmp); if (slice<0) slice = 0; if (slice>=nslices) slice = nslices-1;; /* if half axis in y-direction is given: local viscous damping !!! */ if(stadium.y != 0.0){ /* calc desired temperature */ temperature = Tleft + (Tright-Tleft) * (nslices*tmp - nslices_Left) /(nslices - nslices_Left - nslices_Right); if (temperature < Tleft ) temperature = Tleft; if (temperature > Tright) temperature = Tright; /* calc kinetic "temperature" for actual atom */ tmp = SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef TWOD tmp2 = ( (restrictions + sort)->x + (restrictions + sort)->y ); #else tmp2 = ( (restrictions + sort)->x + (restrictions + sort)->y + (restrictions + sort)->z ); #endif if(tmp2!=0) tmp /= (real)tmp2; /* calc damping factor form position */ gamma_tmp = (FABS(ORT(p,i,Y)-center.y) - stadium.y)/ (stadium2.y-stadium.y); if ( gamma_tmp < 0.0) gamma_tmp = 0.0; if ( gamma_tmp > 1.0) gamma_tmp = 1.0; /* smooth the gamma_tmp funktion*/ gamma_tmp = .5 * (1 + sin(-M_PI/2.0 + M_PI*gamma_tmp)); /* to share the code with the non local version we overwrite the gamma values every timestep */ *(gamma_ftg+slice) = (gamma_min + gamma_bar * gamma_tmp) * (tmp-temperature) / sqrt(SQR(tmp) + SQR(temperature/delta_ftg)); } /* add up degrees of freedom considering restriction vector */ #ifdef TWOD *(ninslice + slice) += ( (restrictions + sort)->x + (restrictions + sort)->y ); #else *(ninslice + slice) += ( (restrictions + sort)->x + (restrictions + sort)->y + (restrictions + sort)->z ); #endif /* twice the old kinetic energy */ *(E_kin_1 + slice) += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef FBC /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif KRAFT(p,i,X) *= (restrictions + sort)->x; KRAFT(p,i,Y) *= (restrictions + sort)->y; #ifndef TWOD KRAFT(p,i,Z) *= (restrictions + sort)->z; #endif #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif reibung = 1.0 - *(gamma_ftg + slice) * timestep / 2.0; eins_d_reib = 1.0 / (1.0 + *(gamma_ftg + slice) * timestep / 2.0); reibung_y = 1.0 - (*(gamma_ftg + slice) + dotepsilon) * timestep / 2.0; eins_d_reib_y = 1.0 / (1.0 + (*(gamma_ftg + slice) + dotepsilon) * timestep / 2.0); /* new momenta */ IMPULS(p,i,X) = (IMPULS(p,i,X) * reibung + timestep * KRAFT(p,i,X)) * eins_d_reib * (restrictions + sort)->x; IMPULS(p,i,Y) = (IMPULS(p,i,Y) * reibung_y + timestep * KRAFT(p,i,Y)) * eins_d_reib_y * (restrictions + sort)->y; #ifndef TWOD IMPULS(p,i,Z) = (IMPULS(p,i,Z) * reibung + timestep * KRAFT(p,i,Z)) * eins_d_reib * (restrictions + sort)->z; #endif /* twice the new kinetic energy */ *(E_kin_2 + slice) += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); /* new positions */ tmp = timestep / MASSE(p,i); epsilontmp = 1.0 + dotepsilon * timestep / 2.0; eins_d_epsilontmp = 1.0 / (1.0 - dotepsilon * timestep / 2.0); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) = (tmp * IMPULS(p,i,Y) + epsilontmp * ORT(p,i,Y)) * eins_d_epsilontmp; #ifndef TWOD ORT(p,i,Z) += tmp * IMPULS(p,i,Z); #endif #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif } } tot_kin_energy = 0.0; for (j=0; j<nslices; j++){ tot_kin_energy += ( *(E_kin_1 + j) + *(E_kin_2 + j)) / 4.0; *(E_kin_ftg+j) = ( *(E_kin_1 + j) + *(E_kin_2 + j)) / 4.0; } #ifdef DEBUG printf("%d: ", myid); for (j=0;j<nslices;j++) printf("%3.6f ", *(E_kin_2 +j)); printf("\n"); #endif #ifdef MPI /* add up results from different CPUs */ for (j=0; j<nslices; j++) *(ftgtmpvec1 + j) = *(E_kin_ftg + j); MPI_Allreduce( ftgtmpvec1, ftgtmpvec2, nslices, REAL, MPI_SUM, cpugrid); for (j=0; j<nslices; j++) *(E_kin_ftg + j) = *(ftgtmpvec2 + j); for (j=0; j<nslices; j++) *(ftgtmpvec1 + j) = *(E_kin_2 + j); MPI_Allreduce( ftgtmpvec1, ftgtmpvec2, nslices, REAL, MPI_SUM, cpugrid); for (j=0; j<nslices; j++) *(E_kin_2 + j) = *(ftgtmpvec2 + j); tmp1 = tot_kin_energy; MPI_Allreduce( &tmp1, &tmp2, 1, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmp2; for (j=0; j<nslices; j++) *(iftgtmpvec1 +j) = *(ninslice + j); MPI_Allreduce( iftgtmpvec1, iftgtmpvec2, nslices, MPI_INT, MPI_SUM, cpugrid); for (j=0; j<nslices; j++) *(ninslice + j) = *(iftgtmpvec2 +j); #endif for (j=0; j<nslices; j++) { temperature = Tleft + (Tright-Tleft)*(j-nslices_Left+1) / (real) (nslices-nslices_Left-nslices_Right+1); if(j>=nslices-nslices_Right) temperature = Tright; if(j<nslices_Left) temperature = Tleft; ttt = temperature * *(ninslice+j); /* time evolution of constraints */ /* dampingmode: 0 -> viscous damping (default); 1 -> Nose-Hoover; */ if(0.0 == ttt){ *(gamma_ftg+j) = 0.0; } else if (dampingmode == 1) { *(gamma_ftg+j) += timestep * ( *(E_kin_2+j) / ttt - 1.0) * gamma_bar; } else if (dampingmode == 0) { *(gamma_ftg+j) = (1.0 - ttt / *(E_kin_2+j)) * gamma_bar; } } } #else void move_atoms_ftg(void) { if (myid==0) error("the chosen ensemble FTG is not supported by this binary"); } #endif /***************************************************************************** * * Integrator with local temperature (Finnis) * * *****************************************************************************/ #ifdef FINNIS void move_atoms_finnis(void) { int j, k; real E_kin_1, E_kin_2; real tmp, tmp1, tmp2; real ttt; real temperature_at; real zeta_finnis; real tmp_f_max2=0.0; fnorm = 0.0; /* loop over all cells */ for (k=0; k<NCELLS; ++k) { int i, j, sort; vektor *rest; cell *p; p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif IMPULS(p,i+j,X) = IMPULS(p,i,X); IMPULS(p,i+j,Y) = IMPULS(p,i,Y); #ifndef TWOD IMPULS(p,i+j,Z) = IMPULS(p,i,Z); #endif } #endif /* CLONE */ /* loop over all atoms in the cell */ for (i=0; i<p->n; ++i) { sort = VSORTE(p,i); rest = restrictions + sort; /* calc kinetic "temperature" for actual atom */ tmp = SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef TWOD tmp2 = rest->x + rest->y; #else tmp2 = rest->x + rest->y + rest->z; #endif if (tmp2 != 0) tmp /= tmp2; /* to account for restricted mobilities and to avoid singularities */ temperature_at = (tmp2 !=0) ? (tmp2/3.0 * temperature) : (1e-10); /* to share the code with the non local version we overwrite the zeta values every timestep */ zeta_finnis = zeta_0 * (tmp-temperature_at) / sqrt(SQR(tmp) + SQR(temperature_at*delta_finnis)); /* twice the old kinetic energy */ E_kin_1 += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); #ifdef FBC /* give virtual particles their extra force */ KRAFT(p,i,X) += (fbc_forces + sort)->x; KRAFT(p,i,Y) += (fbc_forces + sort)->y; #ifndef TWOD KRAFT(p,i,Z) += (fbc_forces + sort)->z; #endif #endif KRAFT(p,i,X) *= rest->x; KRAFT(p,i,Y) *= rest->y; #ifndef TWOD KRAFT(p,i,Z) *= rest->z; #endif #ifdef FNORM fnorm += SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_f_max2 = MAX(SQR(KRAFT(p,i,X)),tmp_f_max2); tmp_f_max2 = MAX(SQR(KRAFT(p,i,Y)),tmp_f_max2); #ifndef TWOD tmp_f_max2 = MAX(SQR(KRAFT(p,i,Z)),tmp_f_max2); #endif #endif /* new momenta */ IMPULS(p,i,X) += (-1.0*IMPULS(p,i,X) * zeta_finnis + KRAFT(p,i,X)) * timestep * rest->x; IMPULS(p,i,Y) += (-1.0*IMPULS(p,i,Y) * zeta_finnis + KRAFT(p,i,Y)) * timestep * rest->y; #ifndef TWOD IMPULS(p,i,Z) += (-1.0*IMPULS(p,i,Z) * zeta_finnis + KRAFT(p,i,Z)) * timestep * rest->z; #endif /* twice the new kinetic energy */ E_kin_2 += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); /* new positions */ tmp = timestep / MASSE(p,i); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) += tmp * IMPULS(p,i,Y); #ifndef TWOD ORT(p,i,Z) += tmp * IMPULS(p,i,Z); #endif #ifdef STRESS_TENS if (do_press_calc) { PRESSTENS(p,i,xx) += IMPULS(p,i,X) * IMPULS(p,i,X) / MASSE(p,i); PRESSTENS(p,i,yy) += IMPULS(p,i,Y) * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD PRESSTENS(p,i,zz) += IMPULS(p,i,Z) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,yz) += IMPULS(p,i,Y) * IMPULS(p,i,Z) / MASSE(p,i); PRESSTENS(p,i,zx) += IMPULS(p,i,Z) * IMPULS(p,i,X) / MASSE(p,i); #endif PRESSTENS(p,i,xy) += IMPULS(p,i,X) * IMPULS(p,i,Y) / MASSE(p,i); } #endif } } tot_kin_energy = ( E_kin_1 + E_kin_2 ) / 4.0; #ifdef MPI /* add up results from different CPUs */ tmp1 = tot_kin_energy; MPI_Allreduce( &tmp1, &tmp2, 1, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmp2; #endif } #else void move_atoms_finnis(void) { if (myid==0) error("the chosen ensemble FINNIS is not supported by this binary"); } #endif #ifdef STM /***************************************************************************** * * NVT Integrator with Stadium * *****************************************************************************/ void move_atoms_stm(void) { int k; real tmp_f_max2=0.0; /* we handle 2 ensembles ensindex = 0 -> NVT ;ensindex = 1 -> NVE */ int ensindex = 0; real kin_energie_1[2] = {0.0,0.0}, kin_energie_2[2] = {0.0,0.0}; real tmpvec1[5], tmpvec2[5], ttt; n_stadium = 0; /* loop over all atoms */ #ifdef _OPENMP #pragma omp parallel for reduction(+:kin_energie_1[0],kin_energie_1[1],kin_energie_2[0],kin_energie_2[2],n_stadium) #endif for (k=0; k<NCELLS; ++k) { int i; cell *p; real reibung, eins_d_reib; real tmp; vektor d; int sort=0; p = CELLPTR(k); for (i=0; i<p->n; ++i) { /* Check if outside or inside the ellipse: */ tmp = SQR((ORT(p,i,X)-center.x)/stadium.x) + SQR((ORT(p,i,Y)-center.y)/stadium.y) - 1; if (tmp <= 0) { /* We are inside the ellipse: */ reibung = 1.0; eins_d_reib = 1.0; n_stadium += DIM; ensindex = 1; } else { reibung = 1 - eta * timestep / 2.0; eins_d_reib = 1 / (1 + eta * timestep / 2.0); ensindex = 0; } /* twice the old kinetic energy */ kin_energie_1[ensindex] += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); /* new momenta */ sort = VSORTE(p,i); IMPULS(p,i,X) = (IMPULS(p,i,X)*reibung + timestep * KRAFT(p,i,X)) * eins_d_reib * (restrictions + sort)->x; IMPULS(p,i,Y) = (IMPULS(p,i,Y)*reibung + timestep * KRAFT(p,i,Y)) * eins_d_reib * (restrictions + sort)->y; /* twice the new kinetic energy */ kin_energie_2[ensindex] += SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); /* new positions */ tmp = timestep * MASSE(p,i); ORT(p,i,X) += tmp * IMPULS(p,i,X); ORT(p,i,Y) += tmp * IMPULS(p,i,Y); } } tot_kin_energy = (kin_energie_1[0] + kin_energie_2[0]) / 4.0; E_kin_stadium = (kin_energie_1[1] + kin_energie_2[1]) / 4.0; #ifdef MPI /* add up results from all CPUs */ tmpvec1[0] = tot_kin_energy; tmpvec1[1] = kin_energie_2[0]; tmpvec1[2] = E_kin_stadium; tmpvec1[3] = kin_energie_2[1]; tmpvec1[4] = (real)n_stadium; MPI_Allreduce( tmpvec1, tmpvec2, 5, REAL, MPI_SUM, cpugrid); tot_kin_energy = tmpvec2[0]; kin_energie_2[0] = tmpvec2[1]; E_kin_stadium = tmpvec2[2]; kin_energie_2[1] = tmpvec2[3]; n_stadium = (int)tmpvec2[4]; #endif /* Zeitentwicklung der Parameter */ ttt = (nactive - n_stadium) * temperature; eta += timestep * (kin_energie_2[0] / ttt - 1.0) * isq_tau_eta; } #else void move_atoms_stm(void) { if (myid==0) error("the chosen ensemble STM is not supported by this binary"); } #endif /****************************************************************************** * * NVX Integrator for heat conductivity (direct method) * ******************************************************************************/ #ifdef NVX void move_atoms_nvx(void) { int k, num, nhalf; real Ekin_1, Ekin_2, Ekin_right, Ekin_left, delta_E, Evec1[3], Evec2[3]; real scale, xx, rescale_left, rescale_right; Evec1[0] = 0.0; Evec1[1] = 0.0; Evec1[2] = 0.0; nhalf = hc_nlayers / 2; scale = hc_nlayers / box_x.x; delta_E = hc_heatcurr * 2 * box_y.y * box_z.z * timestep; /* loop over all atoms */ for (k=0; k<NCELLS; ++k) { int i; cell *p = CELLPTR(k); for (i=0; i<p->n; ++i) { /* twice the old kinetic energy */ Ekin_1 = SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); /* new momenta */ IMPULS(p,i,X) += timestep * KRAFT(p,i,X); IMPULS(p,i,Y) += timestep * KRAFT(p,i,Y); #ifndef TWOD IMPULS(p,i,Z) += timestep * KRAFT(p,i,Z); #endif /* twice the new kinetic energy */ Ekin_2 = SPRODN(IMPULS,p,i,IMPULS,p,i) / MASSE(p,i); /* new positions */ ORT(p,i,X) += timestep * IMPULS(p,i,X) / MASSE(p,i); ORT(p,i,Y) += timestep * IMPULS(p,i,Y) / MASSE(p,i); #ifndef TWOD ORT(p,i,Z) += timestep * IMPULS(p,i,Z) / MASSE(p,i); #endif Evec1[0] += (Ekin_1 + Ekin_2) / 4.0; /* kinetic energy of layers 0 and nhalf */ xx = ORT(p,i,X); if (xx<0.0) xx += box_x.x; num = (int) (scale * xx); if (num >= hc_nlayers) num -= hc_nlayers; if (num == 0 ) Evec1[1] += Ekin_2; else if (num == nhalf) Evec1[2] += Ekin_2; } } #ifdef MPI /* Add up results from all cpus */ MPI_Allreduce( Evec1, Evec2, 3, REAL, MPI_SUM, cpugrid); tot_kin_energy = Evec2[0]; Ekin_left = Evec2[1]; Ekin_right = Evec2[2]; #else tot_kin_energy = Evec1[0]; Ekin_left = Evec1[1]; Ekin_right = Evec1[2]; #endif /* rescale factors for momenta */ rescale_left = SQRT( 1.0 - delta_E / Ekin_left ); rescale_right = SQRT( 1.0 + delta_E / Ekin_right ); /* rescale the momenta */ for (k=0; k<NCELLS; ++k) { int i; cell *p = CELLPTR(k); for (i=0; i<p->n; ++i) { /* which layer? */ xx = ORT(p,i,X); if (xx<0.0) xx += box_x.x; num = (int) (scale * xx); if (num >= hc_nlayers) num -= hc_nlayers; /* rescale momenta */ if (num == 0) { IMPULS(p,i,X) *= rescale_left; IMPULS(p,i,Y) *= rescale_left; #ifndef TWOD IMPULS(p,i,Z) *= rescale_left; #endif } else if (num == nhalf) { IMPULS(p,i,X) *= rescale_right; IMPULS(p,i,Y) *= rescale_right; #ifndef TWOD IMPULS(p,i,Z) *= rescale_right; #endif } } } } #else void move_atoms_nvx(void) { if (myid==0) error("the chosen ensemble NVX is not supported by this binary"); } #endif /***************************************************************************** * * Move the atoms for the Conjugated Gradient relaxator * *****************************************************************************/ #ifdef CG void move_atoms_cg(real alpha) { int k; real tmp_x_max2 = 0.0; real tmpvec1[1], tmpvec2[1]; /* loop over all cells */ xnorm=0; for (k=0; k<NCELLS; ++k) { int i, j, sort; cell *p; p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif } #endif /* CLONE */ for (i=0; i<p->n; ++i) { /* CG: move atoms in search direction for linmin */ ORT(p,i,X) = OLD_ORT(p,i,X) + alpha * CG_H(p,i,X); ORT(p,i,Y) = OLD_ORT(p,i,Y) + alpha * CG_H(p,i,Y); #ifndef TWOD ORT(p,i,Z) = OLD_ORT(p,i,Z) + alpha * CG_H(p,i,Z); #endif #ifdef RELAXINFO xnorm += alpha * alpha * SPRODN(CG_H,p,i,CG_H,p,i); /* determine the biggest force component */ tmp_x_max2 = MAX( alpha * alpha *SQR(CG_H(p,i,X)),tmp_x_max2); tmp_x_max2 = MAX( alpha * alpha *SQR(CG_H(p,i,Y)),tmp_x_max2); #ifndef TWOD tmp_x_max2 = MAX( alpha * alpha *SQR(CG_H(p,i,Z)),tmp_x_max2); #endif #endif } } #ifdef RELAXINFO #ifdef MPI tmpvec1[0] = xnorm; MPI_Allreduce( tmpvec1, tmpvec2, 1, REAL, MPI_SUM, cpugrid); xnorm = tmpvec2[0]; MPI_Allreduce( &tmp_x_max2, &x_max2, 1, REAL, MPI_MAX, cpugrid); #else x_max2 = tmp_x_max2; #endif #endif if ((cg_infolevel>0) && (0==myid)) { printf("moveatoms, alpha %.12e , xmax %.12e\n", alpha, SQRT(x_max2)); fflush(stdout); } } #else void move_atoms_cg(real alpha) { if (myid==0) error("the chosen ensemble CG is not supported by this binary"); } #endif /* steepest descent step, needed for ACG , could also be used just to do a steepest descent */ #if defined(CG) || defined(SD) void move_atoms_sd(real alpha) { int k; real tmp_x_max2 = 0.0; real tmpvec1[1], tmpvec2[1]; /* loop over all cells */ xnorm=0; for (k=0; k<NCELLS; ++k) { int i, j, sort; cell *p; p = CELLPTR(k); #ifdef CLONE for (i=0; i<p->n; i+=nclones) for (j=1; j<nclones; j++) { KRAFT(p,i+j,X) = KRAFT(p,i,X); KRAFT(p,i+j,Y) = KRAFT(p,i,Y); #ifndef TWOD KRAFT(p,i+j,Z) = KRAFT(p,i,Z); #endif } #endif /* CLONE */ for (i=0; i<p->n; ++i) { /* CG: move atoms in force direction for linmin */ ORT(p,i,X) = OLD_ORT(p,i,X) + alpha * KRAFT(p,i,X); ORT(p,i,Y) = OLD_ORT(p,i,Y) + alpha * KRAFT(p,i,Y); #ifndef TWOD ORT(p,i,Z) = OLD_ORT(p,i,Z) + alpha * KRAFT(p,i,Z); #endif #ifdef RELAXINFO xnorm += alpha * alpha * SPRODN(KRAFT,p,i,KRAFT,p,i); /* determine the biggest force component */ tmp_x_max2 = MAX( alpha * alpha *SQR(KRAFT(p,i,X)),tmp_x_max2); tmp_x_max2 = MAX( alpha * alpha *SQR(KRAFT(p,i,Y)),tmp_x_max2); #ifndef TWOD tmp_x_max2 = MAX( alpha * alpha *SQR(KRAFT(p,i,Z)),tmp_x_max2); #endif #endif } } #ifdef RELAXINFO #ifdef MPI tmpvec1[0] = xnorm; MPI_Allreduce( tmpvec1, tmpvec2, 1, REAL, MPI_SUM, cpugrid); xnorm = tmpvec2[0]; MPI_Allreduce( &tmp_x_max2, &x_max2, 1, REAL, MPI_MAX, cpugrid); #else x_max2 = tmp_x_max2; #endif #endif if ((cg_infolevel>0) && (0==myid)) { printf("moveatoms, alpha %.12e , xmax %.12e\n", alpha, SQRT(x_max2) ); fflush(stdout); } } #else void move_atoms_sd(real alpha) { if (myid==0) error("the chosen ensemble CG or SD is not supported by this binary"); } #endif #ifdef SHOCK /***************************************************************************** * * Calculate average momentum in x direction * *****************************************************************************/ void calc_pxavg(void) { integer *num_1, *num_2; real *dat_1, *dat_2, scale; int i, k; /* backup if dist_ur is not set */ if (0.0==dist_ur.x) { dist_ur.x = box_x.x; dist_ur.y = box_y.y; #ifndef TWOD dist_ur.z = box_z.z; #endif } #ifdef MPI dat_1 = (real *) malloc( dist_dim.x * sizeof(real ) ); num_1 = (integer *) malloc( dist_dim.x * sizeof(integer) ); dat_2 = (real *) malloc( dist_dim.x * sizeof(real ) ); num_2 = (integer *) malloc( dist_dim.x * sizeof(integer) ); if ((NULL==dat_1) || (NULL==num_1) || (NULL==dat_2) || (NULL==num_2)) error("Cannot allocate distribution data."); #else dat_1 = (real *) malloc( dist_dim.x * sizeof(real ) ); num_1 = (integer *) malloc( dist_dim.x * sizeof(integer) ); dat_2 = dat_1; num_2 = num_1; if ((NULL==dat_1) || (NULL==num_1)) error("Cannot allocate distribution data."); #endif /* the bins are orthogonal slices in space */ scale = dist_dim.x / (dist_ur.x - dist_ll.x); /* clear distributions */ for (i=0; i<dist_dim.x; i++) { dat_1[i] = 0.0; num_1[i] = 0; } /* loop over all atoms */ for (k=0; k<NCELLS; ++k) { cell *p = CELLPTR(k); for (i=0; i<p->n; ++i) { int n = (int) (scale * (ORT(p,i,X) - dist_ll.x)); if ((n < 0) || (n >= dist_dim.x)) continue; num_1[n]++; dat_1[n] += IMPULS(p,i,X); } } #ifdef MPI /* add up results form different CPUs */ MPI_Allreduce( dat_1, dat_2, dist_dim.x, REAL, MPI_SUM, cpugrid); MPI_Allreduce( num_1, num_2, dist_dim.x, INTEGER, MPI_SUM, cpugrid); #endif /* normalize distribution */ for (i=0; i<dist_dim.x; i++) { if (num_2[i] > 0) dat_2[i] /= num_2[i]; } /* loop over all atoms */ for (k=0; k<NCELLS; ++k) { cell *p = CELLPTR(k); for (i=0; i<p->n; ++i) { int n = (int) (scale * (ORT(p,i,X) - dist_ll.x) - 0.5); if (n < 0) { PXAVG(p,i) = dat_2[0]; } else if (n >= dist_dim.x-1) { PXAVG(p,i) = dat_2[dist_dim.x-1]; } else if (num_2[n]>0){ real chi = (ORT(p,i,X) - n / scale) * scale; PXAVG(p,i) = dat_2[n] * (1-chi) + chi * dat_2[n+1]; } else { PXAVG(p,i) = dat_2[n]; } } } #ifdef MPI free(dat_1); free(num_1); free(dat_2); free(num_2); #else free(dat_1); free(num_1); #endif } #endif
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
DRB024-simdtruedep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This one has data races due to true dependence. But data races happen at instruction level, not thread level. Data race pair: a[i+1]@66:5 vs. a[i]@66:12 */ #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; int a[100], b[100]; #pragma omp parallel for private(i ) for (i=0;i<len;i++) { a[i]=i; b[i]=i+1; } for (i=0;i<len-1;i++) a[i+1]=a[i]+b[i]; for (i=0;i<len;i++) printf("i=%d a[%d]=%d\n",i,i,a[i]); return 0; }
iwbt.c
//iwbt -- compute wet or icebult from air and dewpoint temperatures #include <stdio.h> #include <stdlib.h> #include <math.h> #include <errno.h> #include <omp.h> #include "envphys_c.h" #include "envphys.h" #define RH2O 461.5 /* Gas constant for water vapor (J/kg/K) */ #define EPS (MOL_H2O/MOL_AIR) /* Ratio of moleculr weights of water and dry air */ //#define CONVERGE 0.1 /* Convergence value */ double wetbulb( double ta, /* air tempterature (K) */ double dpt, /* dewpoint temperature (K) */ double press, /* total air pressure (Pa) */ double tol) /* wet_bulb tolerance threshold */ { int i; double ea; /* vapor pressure (Pa) */ double esat; /* saturation ea @ Ta (Pa) */ double xlh; /* latent heat of vaporization + fusion (sublimation) (J/kg) */ double xlhv; /* latent heat of vaporization *(J/kg) */ double xlhf; /* latent heat of fusion *(J/kg) */ double fu_fac; /* fudge factor for xlh stradeling 0 */ double psyc; /* Psychrometric "constant" (K/Pa) */ double dedt; /* Change in ea with temperature (Pa/K) */ double pf; /* Psychrometer value (K) */ double dpdt; /* Change in pf with temperature */ double ti; /* wet or ice bulb temperature (K) */ double ti0; /* initial value for ti */ double dti; /* closure value */ /* find latent heat of vaporization, or vaporization + fusion */ if (ta <= FREEZE) { xlhv = LH_VAP((ta + dpt) / 2.0); xlhf = LH_FUS((ta + dpt) / 2.0); xlh = xlhv + xlhf; } else if (dpt <= FREEZE) { xlhv = LH_VAP((ta + dpt) / 2.0); xlhf = LH_FUS((FREEZE + dpt) / 2.0); fu_fac = ((FREEZE - dpt) / (ta - dpt)); xlh = xlhv + (fu_fac * xlhf); } else xlh = LH_VAP((ta+dpt)/2); /* vapor pressure and saturation vapor pressure at ta */ ea = sati(dpt); esat = sati(ta); /* Psychrometric "constant" (K/Pa) */ psyc = EPS * (xlh / (CP_AIR * press)); /* solve for wet or ice bulb temperature */ dti = 1.0; i = 0; ti = ta; while (dti > tol) { ti0 = ti; if (ti != ta) esat = sati(ti); dedt = xlh * (esat / (RH2O * (ti*ti))); pf = (ti - ta) + (psyc * (esat - ea)); dpdt = 1.0 + (psyc * dedt); ti = ti - (pf / dpdt); dti = ti0 - ti; i++; if (i > 10){ printf("failure to converge in 10 iterations"); exit(-1); } } return(ti); } //Function to calculate the wet bult temeprature of the whole image void iwbt ( int ngrid, /* number of grid points */ double *ta, /* air temperature */ double *td, /* dew point temperature */ double *z, /* elevation */ int nthreads, /* number of threads for parrallel processing */ double tol, /* wet_bulb tolerance threshold */ double *tw) /* wet bulb temperature (return) */ { int samp; double td_p; /* dew point temperature (C) */ double tw_p; /* wet bulb temperature (C) */ double ta_p; /* air temperature (C) */ double z_p; /* elevation (m) */ double pa_p; /* air pressure (pa) */ omp_set_dynamic(0); // Explicitly disable dynamic teams omp_set_num_threads(nthreads); // Use N threads for all consecutive parallel regions #pragma omp parallel shared(ngrid, ta, td, z) private(samp, ta_p, tw_p, z_p, pa_p, td_p) { #pragma omp for for (samp=0; samp < ngrid; samp++) { // get pixel values ta_p = ta[samp]; td_p = td[samp]; z_p = z[samp]; /* set pa */ if (z_p == 0.0) { pa_p = SEA_LEVEL; } else { pa_p = HYSTAT (SEA_LEVEL, STD_AIRTMP, STD_LAPSE, (z_p / 1000.0), GRAVITY, MOL_AIR); } /* convert ta & td to Kelvin */ ta_p += FREEZE; td_p += FREEZE; if(ta_p < 0 || td_p < 0){ printf("ta or td < 0 at pixel %i", samp); exit(-1); } /* call wetbulb function & fill output buffer */ tw_p = wetbulb(ta_p, td_p, pa_p, tol); // put back in array tw[samp] = tw_p - FREEZE; } } }
7025.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(4) { /* E := A*B */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
DRB013-nowait-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is extracted from a paper: Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 Some threads may finish the for loop early and execute errors = dt[9]+1 while another thread may still be simultaneously executing the for worksharing region by writing to d[9], causing data races. Data race pair: a[i]@72:7 vs. a[9]@75:13. */ #include "omprace.h" #include <omp.h> #include <stdio.h> int main() { omprace_init(); int i,error; int len = 1000; int a[len], b=5; for (i=0; i<len; i++) a[i]= i; #pragma omp parallel shared(b, error) { #pragma omp for nowait schedule(dynamic,1) for(i = 0; i < len; i++) a[i] = b + a[i]*5; #pragma omp single error = a[9] + 1; } printf ("error = %d\n", error); omprace_fini(); return 0; }
pooling_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #256] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "fmax v0.4s, v0.4s, v2.4s \n" "fmax v1.4s, v1.4s, v3.4s \n" "fmaxp v2.4s, v0.4s, v1.4s \n" "subs %w0, %w0, #1 \n" "st1 {v2.4s}, [%3], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3"); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%1]! \n" "vld1.f32 {d4-d7}, [%2]! \n" "vmax.f32 q0, q0, q2 \n" "vmax.f32 q1, q1, q3 \n" "vpmax.f32 d4, d0, d1 \n" "vpmax.f32 d5, d2, d3 \n" "subs %0, #1 \n" "vst1.f32 {d4-d5}, [%3]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float max0 = std::max(r0[0], r0[1]); float max1 = std::max(r1[0], r1[1]); *outptr = std::max(max0, max1); r0 += 2; r1 += 2; outptr++; } r0 += tailstep; r1 += tailstep; } } }
axpbyMany.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(axpbyMany)(const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat & alpha, const dfloat * __restrict__ cpu_a, const dfloat & beta, dfloat * __restrict__ cpu_b){ #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) #endif for(int fld=0;fld<Nfields;fld++) { for(dlong i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat bi = cpu_b[id]; cpu_b[id] = alpha*ai + beta*bi; } } }
saturnin-linear-trail-weight-three-rounds.c
#include "saturnin-common.h" #include "../global-common.h" #include "../convolution.h" #include <math.h> #include <stdbool.h> #include <omp.h> #define NUMBER_OF_CANDIDATES 349 #define NUMBER_OF_ELEMENTS (1 << BOX_WIDTH) #define DT_SIZE (NUMBER_OF_ELEMENTS*NUMBER_OF_ELEMENTS) #define NUM_THREADS 36 #define BOUND 36 static int correlation_table_even[DT_SIZE]; static int correlation_table_odd[DT_SIZE]; /* * Count number of input correlations equal to v in column corresponding to output correlation b */ void count_input(mpz_t c, uint16_t b, int v, int index) { mpz_set_ui(c, 0); for (int i = 0; i < NUMBER_OF_ELEMENTS; i++) { if ((index % 2 == 0) && abs(correlation_table_even[NUMBER_OF_ELEMENTS*i+b]) == v) { mpz_add_ui(c, c, 1); } if ((index % 2 != 0) && abs(correlation_table_odd[NUMBER_OF_ELEMENTS*i+b]) == v) { mpz_add_ui(c, c, 1); } } } /* * Count number of output correlations equal to v in row corresponding to input correlation a */ void count_output(mpz_t c, uint16_t a, int v, int index) { mpz_set_ui(c, 0); for (int i = 0; i < NUMBER_OF_ELEMENTS; i++) { if ((index % 2 == 0) && abs(correlation_table_even[NUMBER_OF_ELEMENTS*a+i]) == v) { mpz_add_ui(c, c, 1); } if ((index % 2 != 0) && abs(correlation_table_odd[NUMBER_OF_ELEMENTS*a+i]) == v) { mpz_add_ui(c, c, 1); } } } /* * b is an encoding of a 4x4 rectangle of bits as a flat 16-bit array where (i, j) is mapped to (4*i+j). * This function swaps entries i and j. */ uint16_t swap(const uint16_t b, const unsigned long i, const unsigned long j) { uint16_t x = ((b >> i) ^ (b >> j)) & 1U; // XOR temporary return b ^ ((x << i) | (x << j)); } /* * An activity pattern is represented by a 4x4 rectangle where the position of an activity bit corresponding to a nibble with a given index * is as follows: * 3 2 1 0 * 6 5 4 7 * 9 8 11 10 * 12 15 14 13 * For example, the activity bit of a nibble with index 4 is in position (1,1) in the rectangle. * The actual entries consist of 0's and 1's, indicating whether a nibble is active or not. * In this representation a row in the rectangle corresponds to the input of mix columns and a column to * the input of the inverse of mix columns. * * The rectangle is stored as 16 bits, where (i, j) is mapped to (4*i+j) * * This function converts this representation to the sequential one: * 0 1 2 3 * 4 5 6 7 * 8 9 10 11 * 12 13 14 15 */ uint16_t convert(const uint16_t b) { uint16_t r = swap(b, 0, 3); r = swap(r, 1, 2); r = swap(r, 4, 6); r = swap(r, 8, 9); r = swap(r, 10, 11); return swap(r, 13, 15); } int main(void) { // We know that the number of candidate masks is NUMBER_OF_CANDIDATES, because we used the code below // to compute and print them. uint16_t candidate_masks[NUMBER_OF_CANDIDATES]; // Consider a single slice, consisting of 16 nibbles. // Associated with this are 2^16 possible activity patterns. Each activity pattern is represented by a rectangle as // described above where rows correspond to mix columns input and columns to inverse mix columns input. // We step through each activity pattern. for (long x = 0, k = 0; x <= UINT16_MAX; x++) { long min_box_weight = hamming_weight16(x); // Consider each column for (long j = 0; j < 4; j++) { long active = 0; for (long i = 0; i < 4; i++) { if ((x & (1U << (4*i+j))) != 0) { ++active; } } if (active != 0) { // Mix columns ensures at least 5 active nibbles min_box_weight += 5-active; } } // Consider each row for (long i = 0; i < 4; i++) { long active = 0; for (long j = 0; j < 4; j++) { if ((x & (1U << (4*i+j))) != 0) { ++active; } } if (active != 0) { // Mix columns inverse ensures at least 5 active nibbles min_box_weight += 5-active; } } // Each active nibble contributes at least a weight of 2, so we check whether this lower bound is below // the upper bound that we set. if (min_box_weight <= BOUND/2) { // convert to sequential representation. candidate_masks[k] = convert(x); k++; } } fill_correlation_table(correlation_table_even, sbox_even, BOX_WIDTH); fill_correlation_table(correlation_table_odd, sbox_odd, BOX_WIDTH); Table_fixed *total_weight = table_fixed_create(1+BOUND); Table_fixed *weights[NUMBER_OF_CANDIDATES]; for (long i = 0; i < NUMBER_OF_CANDIDATES; i++) { weights[i] = table_fixed_create(1+BOUND); } // The number of iterations of the main loop is bounded from above by 2^9 omp_set_num_threads(NUM_THREADS); #pragma omp parallel for schedule(dynamic) // Consider each candidate mask for (long i = 0; i < NUMBER_OF_CANDIDATES; i++) { mpz_t val; mpz_init(val); uint16_t x = candidate_masks[i]; int box_weight_a = hamming_weight16(x); // The number of iterations of loop is bounded from above by 2^16, since w <= 4 // Step over all possible masks after the second Sbox-layer, // having box-activity pattern equal to x for (uint64_t a = 0; a < (uint64_t) pow(2, 4*box_weight_a); a++) { if (box_weight(a, box_weight_a, 4) != box_weight_a) { continue; } // Build the mask before the third Sbox-layer uint64_t state1 = 0; for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) { if ((x & (1U << j)) != 0) { state1 |= ((a >> 4*m) & 0xf) << 4*j; m++; } } // Build the mask after the second Sbox-layer state1 = permute_slice(state1); state1 = mix_columns_slice_transposed_inverse(state1); state1 = permute_slice_inverse(state1); // Compute the convolution of the histograms of the third Sbox-layer Table_fixed *bases[32]; for (long j = 16; j < 32; j++) { bases[j] = table_fixed_create(5); } for (long j = 0; j < 16; j++) { count_output(val, (state1 >> 4*j) & 0xf, 16, j); table_fixed_insert_and_merge(bases[16+j], 0, val, &mpz_add); count_output(val, (state1 >> 4*j) & 0xf, 8, j); table_fixed_insert_and_merge(bases[16+j], 2, val, &mpz_add); count_output(val, (state1 >> 4*j) & 0xf, 4, j); table_fixed_insert_and_merge(bases[16+j], 4, val, &mpz_add); } Table_fixed *convolved_state1 = table_fixed_copy(bases[16]); for (long j = 17; j < 32; j++) { Table_fixed *tmp = convolve_fixed(convolved_state1, bases[j], 0, BOUND); table_fixed_destroy(convolved_state1); convolved_state1 = tmp; } // For a fixed mask AFTER the second Sbox-layer, consider all the masks BEFORE the second Sbox-layer, // the number of which is bounded from above by 2^12 (empirically verified) for (uint64_t b = 0; b < (uint64_t) pow(2, 4*box_weight_a); b++) { bool valid = true; long linear_weight_middle = 0; for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) { if ((x & (1U << j)) != 0) { long row_index = (b >> 4*m) & 0xf; long column_index = (a >> 4*m) & 0xf; long count; if (j % 2 == 0) { count = correlation_table_even[16*row_index+column_index]; } else { count = correlation_table_odd[16*row_index+column_index]; } if (count == 0) { valid = false; break; } linear_weight_middle += 2*(4 - (long) log2((double) abs(count))); m++; } } if (valid) { uint64_t state0 = 0; for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) { if ((x & (1U << j)) != 0) { state0 |= ((b >> 4*m) & 0xf) << 4*j; m++; } } state0 = mix_columns_slice_transposed(state0); // Filter anything that will exceed the bound to save us the work of doing the convolution if (box_weight_a + box_weight(state0, 16, BOX_WIDTH) + box_weight(state1, 16, BOX_WIDTH) > BOUND/2) { continue; } // at this point, we can do the convolution // use b1 and b2 to determine active sboxes and // convolve for (long j = 0; j < 16; j++) { /* linear weight 0, 2, and 4 */ bases[j] = table_fixed_create(5); } for (long j = 0; j < 16; j++) { count_input(val, (state0 >> 4*j) & 0xf, 16, j); table_fixed_insert_and_merge(bases[j], 0, val, &mpz_add); count_input(val, (state0 >> 4*j) & 0xf, 8, j); table_fixed_insert_and_merge(bases[j], 2, val, &mpz_add); count_input(val, (state0 >> 4*j) & 0xf, 4, j); table_fixed_insert_and_merge(bases[j], 4, val, &mpz_add); } Table_fixed *convolved_state_total = table_fixed_copy(convolved_state1); for (long j = 0; j < 16; j++) { Table_fixed *tmp = convolve_fixed(convolved_state_total, bases[j], 0, BOUND-linear_weight_middle); table_fixed_destroy(convolved_state_total); convolved_state_total = tmp; } for (size_t j = 0; j <= BOUND-linear_weight_middle; j++) { table_fixed_insert_and_merge(weights[i], j+linear_weight_middle, convolved_state_total->head[j], &mpz_add); } for (long j = 0; j < 16; j++) { table_fixed_destroy(bases[j]); } table_fixed_destroy(convolved_state_total); } } for (long j = 16; j < 32; j++) { table_fixed_destroy(bases[j]); } table_fixed_destroy(convolved_state1); } mpz_clear(val); char filename[60]; sprintf(filename, "data/saturnin-linear-trail-three-rounds_%04x.txt", x); table_fixed_print(filename, weights[i]); } for (long i = 0; i <= BOUND; i++) { for (long j = 0; j < NUMBER_OF_CANDIDATES; j++) { table_fixed_insert_and_merge(total_weight, i, weights[j]->head[i], &mpz_add); } } table_fixed_print("data/saturnin-linear-trail-three-rounds_total.txt", total_weight); for (long i = 0; i < NUMBER_OF_CANDIDATES; i++) { table_fixed_destroy(weights[i]); } table_fixed_destroy(total_weight); return 0; }
GB_unop__cos_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cos_fc32_fc32) // op(A') function: GB (_unop_tran__cos_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = ccosf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccosf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = ccosf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cos_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ccosf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ccosf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cos_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shape.h
/* * shape.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef SHAPE_H_ #define SHAPE_H_ #include <cstring> #include <cstdio> #include <dll.h> #include <nd4jmalloc.h> #include <templatemath.h> #include "pointercast.h" #define MAX_DIMENSION 0x7fffffff #define MAX_NUM_THREADS 1024 #define MAX_RANK 32 #define MAX_COORD 3 #define PREALLOC_SIZE 33554432 #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include <sharedmem.h> #endif #include <pairwise_util.h> namespace shape { /** * Shape information approximating * the information on an ndarray */ struct ShapeInformation { #ifdef __CUDACC__ __host__ __device__ #endif ShapeInformation(int *shape_ = nullptr, int *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0) : shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_) {} int *shape; int *stride; char order; int rank; int offset; int elementWiseStride; }; /** * Indexing information * for bounds checking */ struct CurrentIndexing { int numElementsPerThread; int blockStartingIndex; int startingThreadIndex; int endingThreadIndex; }; #ifdef __CUDACC__ __host__ __device__ #endif inline bool shapeEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2); #ifdef __CUDACC__ __host__ __device__ #endif inline bool shapeEquals(int *shapeInfo1,int *shapeInfo2); #ifdef __CUDACC__ __host__ __device__ #endif inline bool strideEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2); #ifdef __CUDACC__ __host__ __device__ #endif inline bool strideEquals(int *shapeInfo1,int *shapeInfo2); #ifdef __CUDACC__ __host__ __device__ #endif inline bool strideEquals(int *stride,int strideRank,int *stride2,int stride2Rank); #ifdef __CUDACC__ __host__ __device__ inline void traceNew(int id) { //TODO: remove this method before going to release // printf("new happened: [%i]\n", id); } #else inline void traceNew(int id) { //no-op } #endif #ifdef __CUDACC__ __host__ __device__ #endif inline int tadIndexForLinear(int linearIndex, int tadLength); /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeBuffer(int rank, int *shape); #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeBuffer(int rank, int *shape, int *buffer); /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeBufferFortran(int rank, int *shape); #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange, int *tmpBuffer); #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange, int *tmpBuffer); #ifdef __CUDACC__ template <typename T> __device__ inline int *cuMalloc(int *buffer, long size, UnifiedSharedMemory *manager); __device__ inline int *cuMalloc(int *buffer, long size); #endif /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int * calcStridesFortran(int *shape, int rank); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* calcStrides(int *shape, int rank); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* calcStridesFortran(int *shape, int rank, int startNum); /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* calcStrides(int *shape, int rank, int startNum); /** * @param toCopy the shape to copy * @return a copy of the original struct */ #ifdef __CUDACC__ __host__ __device__ #endif inline ShapeInformation *shapeCopy( ShapeInformation *toCopy); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ #ifdef __CUDACC__ __host__ __device__ #endif inline int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder); /** * Compute the element wise stride * for a given shape/stride configuration * @param rank the rank of the shape/stride * @param shape the shape * @param stride the stride * @param isFOrder 0 or 1 for whether the array is f * ordered or not * @return -1 if there is no element wise stride the * element wise stride of reshape(1,length) otherwise */ #ifdef __CUDACC__ __host__ __device__ #endif inline int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder, int *dimension, int dimensionLength); #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride); #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride, int *buffer); /** * * @param length * @param shape * @param rearrange * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *doPermuteSwap(int length, int *shape, int *rearrange); /** * In place permute swap * @param length * @param shape * @param rearrange */ #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteSwap(int length, int **shape, int *rearrange); #ifdef __CUDACC__ __host__ __device__ #endif inline int *permuteShapeBuffer(int *shapeBuffer,int *rearrange); #ifdef __CUDACC__ __host__ __device__ #endif inline void permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out); #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange); #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange); /** * Rearrange the permute indexes * according to which dimensions are specified. * * For example, dimension is implicitly: * 0,1,2 * * If you want to do a reduce along dimensions 0 and 1, * you need to permute the indexes to be: * 2,0,1 * * which will give us the ability to ierate along an element * wise stride. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *createPermuteIndexes(int originalRank,int *dimension,int dimensionLength); /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline char getOrder(int length, int *shape, int *stride, int elementStride); /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int checkArrangeArray(int *arr, int arrLength, int shapeLength); /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ #ifdef __CUDACC__ __host__ __device__ #endif inline void permute(ShapeInformation **info, int *rearrange, int rank); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of cthe shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isVector(int *shape, int rank); /** * When 1 dimension is the whole length of the * array */ #ifdef __CUDACC__ __host__ __device__ #endif inline int oneDimEqualToLength(int *shape, int rank); #ifdef __CUDACC__ __host__ __device__ #endif inline int oneDimEqualToLength(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif inline int isVector(int *shapeInfo); /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isMatrix(int *shape, int rank); #ifdef __CUDACC__ __host__ __device__ #endif inline int isMatrix(int *shapeInfo); /** * Returns the shape portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif int *shapeOf(int *buffer); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif int *copyOf(int length, int *toCopy); #ifdef __CUDACC__ __host__ __device__ #endif inline int *copyOf(int length, int *toCopy, int *ret); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif inline void copyTo(int length, int *from, int *to); /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif inline void copyTo(int length, int *from, int *to, int *indexes); /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *permutedStrides(int *toPermute, int shapeRank, int *rearrange); /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *slice(int *shape); /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ #ifdef __CUDACC__ __host__ __device__ #endif inline int shapeInfoLength(int rank); /** * Returns the rank portion of * an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif int rank( int *buffer); /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ #ifdef __CUDACC__ __host__ __device__ #endif ShapeInformation *infoFromBuffer(int *buffer); /** * Returns the stride portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif int *stride(int *buffer); /** * Compute the length of the given shape */ #ifdef __CUDACC__ __host__ __device__ #endif int length(int *shapeInfo); /*** * Returns the offset portion of an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif int offset(int *buffer); /** * Returns the ordering * for this shape information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline char order(int *buffer); /** * Returns the element wise stride for this information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline int elementWiseStride(int *buffer); /** * Returns the element wise stride for this information * buffer * relative to a dimension and ordering for a reduction index */ #ifdef __CUDACC__ __host__ __device__ #endif inline int reductionIndexElementWiseStride(int *buffer, int *dimension, int dimensionLength); /** * Returns whether * the given shape info buffer * represents a scalar shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isScalar(int *info); /** * Returns whether * the given shape information * represents a scalar * shape or not */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isScalar(volatile ShapeInformation *info); /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ #ifdef __CUDACC__ __host__ __device__ #endif inline void removeIndex(int *data, int *indexes, int dataLength, int indexesLength, int *out); /** * Iterate over a given set of indexes * the begin and end indexes are 0 based. * 1 padding is automatically assumed for the ending. * * For example if you want to iterate over 0 to 4 * it will go to 4 rather than 3. * * indexes should be the indexes to exclude * indexes length should be the length of indexes */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* everyIndexBut(int *indexes,int indexesLength,int begin,int end); /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ __device__ #endif inline int tadOffset(shape::ShapeInformation *xInfo, int offset); /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* ensureVectorShape(int *shape); #ifdef __CUDACC__ __host__ __device__ #endif inline int* createScalarShapeInfo(); #ifdef __CUDACC__ __host__ __device__ #endif inline int* createScalarShapeInfo(int *ret); /** * Generate an int buffer * up to the given length * at the specified increment * */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *range(int from, int to, int increment); /** * Range between from and two with an * increment of 1 */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *range(int from, int to); /** * Keep the given indexes * in the data */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *keep(volatile int *data, int *index, int indexLength, int dataLength); /** * Generate reverse copy of the data * @param data * @param length * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *reverseCopy(int *data, int length); #ifdef __CUDACC__ __host__ __device__ #endif inline void reverseCopyTo(int *from, int *to, int length); #ifdef __CUDACC__ __host__ __device__ #endif inline void reverseCopyTo(int *from, int *to, int *indexes,int length); /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *concat(int *arr1, int arr1Length, int *arr2, int arr2Length); /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ #ifdef __CUDACC__ __host__ __device__ #endif int *concat(int numArrays, int numTotalElements, int **arr, int *lengths); /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int lengthPerSlice(int rank, int *shape, int *dimension, int dimensionLength); /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int sliceOffsetForTensor(int rank, int index, int *shape, int *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength); /** * Computes the tensor along dimension * offset * @param index the index to get the offset for the tad for * @param rank the rank of the shapes and strides * @param info the shape information to use for tad * @param dimension the dimensions to use for computing the tensor along dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int offset(int index, int rank, shape::ShapeInformation *info, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tensorsAlongDimension(int rank, volatile int length, volatile int *shape, int *dimension, int dimensionLength); /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength); /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadForBlockIndex(int blockSize, int blockIdx, int i); /** * Computes the number of tads per block * */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadsPerBlock(int blockSize, int tads); #ifdef __CUDACC__ __host__ __device__ #endif inline int *tadShapeInfo(int index, int *xShapeInfo, int *dimension, int dimensionLength); /** * Returns a shape buffer * for the shape information metadata. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *toShapeBuffer( ShapeInformation *info); /** * Returns the number of elements per thread */ #ifdef __CUDACC__ __device__ #endif int numElementsPerThread(int N); /** * Returns the block starting index */ #ifdef __CUDACC__ __device__ #endif int blockStartingIndex(int N); /** * Returns the thread starting index */ #ifdef __CUDACC__ __device__ #endif int threadStartingIndex(int N, int stride, int offset); /** * Returns the thread ending index */ #ifdef __CUDACC__ __device__ #endif int threadEndingIndex(int N, int stride, int offset); /** * Returns indexing information * for the current kernel invocation */ #ifdef __CUDACC__ __device__ #endif CurrentIndexing *currentIndex(int N, int offset, int stride); /** Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ #ifdef __CUDACC__ __host__ __device__ #endif int tadIndex(int i, int elementWiseStride, int numElementsPerTad); /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ #ifdef __CUDACC__ __host__ __device__ #endif int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal); /** * Computes the number of tads * per reduce index for the * reduction tad. */ #ifdef __CUDACC__ __host__ __device__ #endif int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal); /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ #ifdef __CUDACC__ __host__ __device__ #endif int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum); /** * Returns the prod of the data * up to the given length */ #ifdef __CUDACC__ __host__ __device__ #endif int prod(int *data, int length); #ifdef __CUDACC__ __host__ __device__ #endif inline int prodLong( int *data, int length); /** * Returns the rear most left over item not present in * the dimension array. This assumes that the dimension array is sorted. * * For example, given a dimension array of: * 0,2 * * and * * 12,4,2,1 in data * * You end up with 1 (data[3]) * since the first item won't match * the last item of the dimension array */ #ifdef __CUDACC__ __host__ __device__ #endif int rearMostLeftOverItem(int *data,int length,int *dimension,int dimensionLength); /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ #ifdef __CUDACC__ __host__ __device__ #endif inline int getOffset(int baseOffset, int *shape, int *stride, int *indices,int rank); #ifdef __CUDACC__ __host__ __device__ #endif int* createShapeInfo(int *shape, int *stride, int rank); #ifdef __CUDACC__ __host__ __device__ #endif int* createShapeInfo(int *shape, int *stride, int rank, int *buffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* ind2sub(int rank, int *shape,int index,int numIndices); #ifdef __CUDACC__ __host__ __device__ #endif inline int *ind2sub(int rank, int *shape,int index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif void ind2sub(int rank,int *shape,int index,int numIndices,int *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif void ind2sub(int rank, int *shape, int index, int *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* ind2subC(int rank, int *shape, int index); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* ind2subC(int rank, int *shape, int index, int numIndices); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2subC(int rank, int *shape, int index, int numIndices, int *out); /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2subC(int rank, int *shape, int index, int *out); /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ #ifdef __CUDACC__ __host__ __device__ #endif int sub2Ind(int rank, int *shape, int *indices); /** * Compute the real linear indices for the given shape and stride */ #ifdef __CUDACC__ __host__ __device__ #endif Nd4jIndex *computeIndices(int rank, int *shape, int *stride); /** * Compute the real linear indices for the * given shape buffer. Shape,stride and rank are derived * from the buffer */ #ifdef __CUDACC__ __host__ __device__ #endif Nd4jIndex *computeIndices( int *shapeBuffer); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif void ind2subOrder(int *shapeInfo,int index,int numIndices,int *out); /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif void ind2subOrder(int *shapeInfo,int index,int *out); #ifdef __CUDACC__ __host__ __device__ #endif void printShapeInfo(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif void printShapeInfoLinear(int *shapeInfo); #ifdef __CUDACC__ __host__ __device__ #endif inline void printIntArray(int *arr,int length); #ifdef __CUDACC__ __host__ __device__ #endif void printArray(float *arr,int length); /** * Dimension collapse is an algorithm * for collapsing singular dimensions. * This algorithm will adjust the dimensions * wrt the original. * * The algorithm has 3 components: * trailing ones * middle ones * beginning ones * * dimensions that are specified to reduce along * that are singular should be truncated * * dimensions that are specified that are singular * at the beginning should be removed with middle dimensions * decremented. * * For any time there is a no op, a collapse will * set the first dimension to be -1. * * */ class TAD { public: int dimensionLength; int *dimension = nullptr; int *shapeInfo = nullptr; int *tadOnlyShapeInfo = nullptr; int numTads = 0; int *tadShape = nullptr; int *tadStride = nullptr; int *tadOffsets = nullptr; int tadOffsetForBlock = 0; int rank = 0; int numOnes = 0; //pointers to original int originalDimensionLength; int *originalDimension = nullptr; int *originalShapeInfo = nullptr; bool squeezed = false; bool newSqueezeDimensions = false; int numOnesInMiddle = 0; bool wholeThing = false; //need to track whether we create a new dimension array or not, we could have just moved the pointer forward //due to leading ones bool createdNewDimension = false; // special case for CUDA, we're passing in __shared__ memory pointers to be used instead of new/malloc void *ptrManager = nullptr; int *ptrOutput = nullptr; #ifdef __CUDACC__ __host__ __device__ #endif TAD() {} #ifdef __CUDACC__ __host__ __device__ #endif TAD(int *shapeInfo,int *dimension,int dimensionLength) { this->init(shapeInfo, dimension, dimensionLength); } #ifdef __CUDACC__ __host__ __device__ #endif inline void setExternalBuffers(void *ptrManager) { this->ptrManager = ptrManager; } #ifdef __CUDACC__ __host__ __device__ #endif inline void setOutputBuffer(int *ptrOutput) { this->ptrOutput = ptrOutput; } #ifdef __CUDACC__ __host__ __device__ #endif /** * This method is for GPU mostly, it allows to initialize TAD instance with precalculated tadOnlyShapeInfo */ inline void initWithExternalTAD(int *existingTAD, int *originalShape, int *dimension, int dimensionLength) { this->tadOnlyShapeInfo = existingTAD; this->rank = shape::rank(originalShape); this->originalShapeInfo = originalShape; this->originalDimension = dimension; this->originalDimensionLength = dimensionLength; this->shapeInfo = originalShape; this->dimension = dimension; this->dimensionLength = dimensionLength; this->tadShape = shape::shapeOf(existingTAD); this->tadStride = shape::stride(existingTAD); this->numTads = shape::length(originalShape) / shape::length(existingTAD); // this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength);//shape::length(originalShape) / shape::length(existingTAD); this->wholeThing = this->numTads == 1 || this->dimensionLength == this->rank || this->numTads == shape::length(this->shapeInfo); } #ifdef __CUDACC__ __host__ __device__ #endif inline void init(int *shapeInfo,int *dimension,int dimensionLength) { this->originalShapeInfo = shapeInfo; this->originalDimension = dimension; this->originalDimensionLength = dimensionLength; //start off as original references this->shapeInfo = shapeInfo; this->dimensionLength = dimensionLength; this->dimension = dimension; this->rank = shape::rank(shapeInfo); this->numTads = this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength); //ensure we get rid of trailing ones in the dimensions //we can do this with a simple decrement of the dimension length for trailing ones //ensure we only do this for non column vectors if(shape::rank(shapeInfo) > 2) for (int i = shape::rank(shapeInfo) - 1; i >= 0; i--) { if (shape::shapeOf(shapeInfo)[i] == 1) { this->numOnes++; if (i > 0 && i < shape::rank(shapeInfo) - 1) this->numOnesInMiddle++; } } //note here that we need to keep the original rank shape info for properly permuting strides and shapes //this->rank -= trailingDimensionDecrement; //this->dimensionLength -= trailingDimensionDecrement; //move dimension ones where dimensions + 1 s overlap if (numOnes > 0) { this->collapse(); } if(!shape::isVector(shapeInfo)) wholeThing = this->numTads == 1 || this->dimensionLength == this->rank || this->numTads == shape::length(shapeInfo); else if(shape::isScalar(shapeInfo)) wholeThing = true; //vector case else { if(dimension == 0 && shape::shapeOf(shapeInfo)[dimension[0]] == 1) { wholeThing = true; } } } template <typename T> #ifdef __CUDACC__ __host__ __device__ #endif void printTADsND(T *x) { if(wholeThing) { for(int i = 0; i < shape::length(tadOnlyShapeInfo); i++) { printf(" %f ",x[i]); } printf("\n"); } else { for (int i = 0; i < numTads; i++) { int offset = tadOffsets[i]; // printf("Offsets for %d is %d\n",i,offset); int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int rankIter = shape::rank(tadOnlyShapeInfo); int xStridesIter[MAX_RANK]; T *xPointer = x + offset; if (PrepareOneRawArrayIter<T>(rankIter, shape::shapeOf(tadOnlyShapeInfo), xPointer, shape::stride(tadOnlyShapeInfo), &rankIter, shapeIter, &xPointer, xStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, shape::rank(tadOnlyShapeInfo), coord, shapeIter); { /* Process the innermost dimension */ printf(" %f ",xPointer[0]); } ND4J_RAW_ITER_ONE_NEXT(dim, rankIter, coord, shapeIter, xPointer, xStridesIter); printf("\n"); } else { printf("Unable to prepare array\n"); } } } } #ifdef __CUDACC__ __host__ __device__ #endif inline void permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out) { memcpy(out,shapeBuffer,sizeof(int) * shape::shapeInfoLength(this->rank)); doPermuteShapeBuffer(this->rank,out,rearrange); } #ifdef __CUDACC__ __host__ __device__ #endif inline int *permuteShapeBuffer(int *shapeBuffer,int *rearrange) { int len = shape::shapeInfoLength(this->rank); int *copy = shape::copyOf(len,shapeBuffer); doPermuteShapeBuffer(rank,shapeBuffer,rearrange); return copy; } #ifdef __CUDACC__ __host__ __device__ #endif void createTadOnlyShapeInfo() { this->tadOnlyShapeInfo = this->shapeInfoOnlyShapeAndStride(); this->tadShape = shape::shapeOf(this->tadOnlyShapeInfo); this->tadStride = shape::stride(this->tadOnlyShapeInfo); } #ifdef __CUDACC__ __host__ __device__ #endif inline int * tad2Sub(int index) { int *shape = shape::shapeOf(shapeInfo); int rank = shape::rank(shapeInfo); int leftOverIndexLen = rank - originalDimensionLength; #ifdef __CUDACC__ int *ret; int *tadShape; int *leftOverIndexes; int *sub; if (ptrManager != nullptr) { UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager; ret = manager->getTempRankBuffer1(); tadShape = manager->getTempRankBuffer2(); leftOverIndexes = manager->getTempRankBuffer3(); sub = manager->getTempRankBuffer4(); } else { ret = new int[rank]; tadShape = new int[leftOverIndexLen]; leftOverIndexes = new int[leftOverIndexLen]; sub = new int[rank]; } #else int *ret = new int[rank]; //shape of the tad int *tadShape = new int[leftOverIndexLen]; int *leftOverIndexes = new int[leftOverIndexLen]; int *sub = new int[rank]; #endif //indexes not specified in the tad indexes //every coordinate starts as zero memset(ret,0,sizeof(int) * rank); //find the length of the elements we //are iterating over int len = 1; //left over index cursor for initializing elements int leftOverIndex = 0; for(int i = 0; i < rank; i++) { //look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference) bool found = false; for(int j = 0; j < originalDimensionLength; j++) { //skip over specified dimensions when computing left over length if(i == originalDimension[j]) { found = true; break; } } //add to the indexes that aren't specified as part of the tad dimension //indexes if(!found) { //accumulate the list of indexes left over used for initializing the return value leftOverIndexes[leftOverIndex] = i; //accumulate the tad shape tadShape[leftOverIndex] = shape[i]; //accumulate the length (product) of the indexes that will be iterated over len *= shape[i]; leftOverIndex++; } } //sub for indices /* int *sub = new int[leftOverIndexLen]; shape::ind2subOrder(tadShape,index,len,sub); */ shape::ind2subC(leftOverIndexLen,tadShape,index,len, sub); for(int i = 0; i < leftOverIndexLen; i++) { ret[leftOverIndexes[i]] = sub[i]; } if (ptrManager == nullptr) { delete[] tadShape; delete[] leftOverIndexes; delete[] sub; } return ret; } #ifdef __CUDACC__ __host__ __device__ #endif ~TAD() { //we may have just moved the pointer forward, we may not need to delete the pointer here if(originalDimension != this->dimension && createdNewDimension) { delete[] this->dimension; } if(this->originalShapeInfo != this->shapeInfo) { delete[] this->shapeInfo; } if(this->tadOffsets != nullptr) { delete[] this->tadOffsets; } if(this->tadOnlyShapeInfo != nullptr && this->tadOnlyShapeInfo != shapeInfo) { delete[] this->tadOnlyShapeInfo; } } #ifdef __CUDACC__ __host__ __device__ #endif inline int* permuteDims() { //permute dimensions for tad int dimIdx = 0; //loop backwards assuming dimension is sorted int *permuteDims = new int[shape::rank(shapeInfo)]; for(int i = 0; i < shape::rank(shapeInfo); i++) { bool found = false; for(int j = 0; j < originalDimensionLength; j++) { if(i == originalDimension[j]) { found = true; break; } } //not found, append it to the end for permute if(!found) permuteDims[dimIdx++] = i; } for(int i = originalDimensionLength - 1; i >= 0; i--) { permuteDims[dimIdx++] = originalDimension[i]; } //permute dimensions for tad return permuteDims; } /** * Compute the tad offset given a dimension. * * The general pattern for computing a tad offset is as follows: * Every $STRIDE that was removed (the first dimension) * do a jump by the major stride of the parent array * (stride[0] of the parent array) * * For example given a c ordered 2,2,3,2 with stride 12,6,2,1 * A tad of dimension 1 will jump 12 every 6 tads. * * You then end up with offsets of: * 0 * 1 * 2 * 3 * 4 * 5 * 12 * 13 * 14 * 15 * 16 * 17 * * notice there are 12 tads here. This same incremental jump will happen * every time. * Note here that by default the * stride of element wise stride is used for the hops. * * Sometimes a jump doesn't happen. If there are less tads * than the stride of the dimension you removed, the * element wise stride will always be used. * * For example in a dimension of 0,1, you end up with offsets of: * 0,1,2,3,4,5 * * Given that the inner most stride of the dimensions that was removed (1) * had a stride of 6, we never need to do a major stride jump. * */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadOffset(int index) { if(tadOnlyShapeInfo == nullptr) { this->createTadOnlyShapeInfo(); } if(wholeThing) return index; if(dimensionLength > 1) { int *tad2Sub = this->tad2Sub(index,ptrManager); int ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo)); if(ret < 0) { if (ptrManager == nullptr) delete[] tad2Sub; return -1; } if (ptrManager == nullptr) delete[] tad2Sub; return ret; } else { int *tad2Sub = this->tad2Sub(index,ptrManager); int ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo)); if (ptrManager == nullptr) delete[] tad2Sub; return ret; } } #ifdef __CUDACC__ __host__ __device__ #endif inline int * tad2Sub(int index, void *ptrManager) { int *shape = shape::shapeOf(shapeInfo); int rank = shape::rank(shapeInfo); int leftOverIndexLen = rank - originalDimensionLength; int *tadShape; int *leftOverIndexes; int *sub; int *ret; #ifdef __CUDACC__ if (ptrManager != nullptr) { UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager; ret = manager->getTempRankBuffer1(); tadShape = manager->getTempRankBuffer2(); leftOverIndexes = manager->getTempRankBuffer3(); sub = manager->getTempRankBuffer4(); } else { ret = new int[rank]; //shape of the tad leftOverIndexes = new int[leftOverIndexLen]; sub = new int[rank]; tadShape = new int[leftOverIndexLen]; } #else ret = new int[rank]; //shape of the tad leftOverIndexes = new int[leftOverIndexLen]; sub = new int[rank]; tadShape = new int[leftOverIndexLen]; #endif //indexes not specified in the tad indexes //every coordinate starts as zero memset(ret,0,sizeof(int) * rank); //find the length of the elements we //are iterating over int len = 1; //left over index cursor for initializing elements int leftOverIndex = 0; for(int i = 0; i < rank; i++) { //look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference) bool found = false; for(int j = 0; j < originalDimensionLength; j++) { //skip over specified dimensions when computing left over length if(i == originalDimension[j]) { found = true; break; } } //add to the indexes that aren't specified as part of the tad dimension //indexes if(!found) { //accumulate the list of indexes left over used for initializing the return value leftOverIndexes[leftOverIndex] = i; //accumulate the tad shape tadShape[leftOverIndex] = shape[i]; //accumulate the length (product) of the indexes that will be iterated over leftOverIndex++; len *= shape[i]; } } //sub for indices /* int *sub = new int[leftOverIndexLen]; shape::ind2subOrder(tadShape,index,len,sub); */ shape::ind2subC(leftOverIndexLen,tadShape,index,len, sub); for(int i = 0; i < leftOverIndexLen; i++) { ret[leftOverIndexes[i]] = sub[i]; } if (ptrManager == nullptr) { delete[] leftOverIndexes; delete[] tadShape; delete[] sub; } return ret; } #ifdef __CUDACC__ __host__ __device__ #endif void createOffsets() { traceNew(1); this->tadOffsets = new int[this->numTads]; for(int i = 0; i < this->numTads; i++) { this->tadOffsets[i] = this->tadOffset(i); } } #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeInfoOnlyShapeAndStride() { if(wholeThing) { return shape::createScalarShapeInfo(); } //ensure tad shapes get setup right for vectors if(dimensionLength < 1 && !shape::isVector(shapeInfo)) return shapeInfo; int *theShape = shape::shapeOf(shapeInfo); int *theStride = shape::stride(shapeInfo); int rank = this->originalDimensionLength <= 1 ? 2 : originalDimensionLength; #ifdef __CUDACC__ int *ret; if (ptrManager != nullptr) { ret = (int *) ((UnifiedSharedMemory *) ptrManager)->getSharedReductionBuffer(); } ret = new int[shape::shapeInfoLength(rank)]; #else int *ret = new int[shape::shapeInfoLength(rank)]; #endif //set the rank ret[0] = rank; int *retShape = shape::shapeOf(ret); int *retStride = shape::stride(ret); //only possible solution is scalar if(shape::isVector(shapeInfo)) { for(int i = 0; i < 2; i++) { retShape[i] = 1; retStride[i] = 0; } } else { int *permuteIndexes = this->permuteDims(); int *toPermute = new int[MAX_RANK]; this->permuteShapeBufferInPlace(shapeInfo,permuteIndexes,toPermute); if(originalDimensionLength == 1) { if((numOnes < 1 && !shape::isMatrix(shapeInfo)) || this->rank == 2) { if(originalDimension[0] == 0) { int newStride[2] = {1,theStride[originalDimension[0]]}; int newShape[2] = {1,theShape[originalDimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { int newStride[2] = {theStride[originalDimension[0]],1}; int newShape[2] = {theShape[originalDimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else if(originalDimensionLength > 1) { int shapeOffset = shape::rank(shapeInfo) - originalDimensionLength; int *permutedShape = shape::shapeOf(toPermute) + shapeOffset; int *permutedStride = shape::stride(toPermute) + shapeOffset; //now that the dimensions are permuted, all of the tad shapes/strides are in the back //all we need to do is copy from the start of the tad dimensions to the end since they are //arranged in the right order shape::copyTo(originalDimensionLength, permutedStride, retStride); shape::copyTo(originalDimensionLength, permutedShape, retShape); } else { if(shape::rank(toPermute) > 2) { //get the last 2 dimensions int *lastDimensionShape = shape::shapeOf(toPermute) + shape::rank(toPermute) - originalDimensionLength; int *lastDimensionStride = shape::stride(toPermute) + shape::rank(toPermute) - originalDimensionLength; //the last dimension specified and a matrix are an equivalent edge case if(theShape[originalDimension[0]] == 1 || shape::shapeOf(toPermute)[shape::rank(toPermute) - 1] == 1) { int newStride[2] = {theStride[originalDimension[0]],1}; int newShape[2] = {theShape[originalDimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; //when the actual shape matches the last 2 dimensions of the permuted array, use those strides instead if(lastDimensionShape[0] == retShape[0] && lastDimensionShape[1] == retShape[1]) { retStride[0] = lastDimensionStride[0]; retStride[1] = lastDimensionStride[1]; } else { retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { int newStride[2] = {1,theStride[originalDimension[0]]}; int newShape[2] = {1,theShape[originalDimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; //when the actual shape matches the last 2 dimensions of the permuted array, use those strides instead if(lastDimensionShape[0] == retShape[0] && lastDimensionShape[1] == retShape[1]) { retStride[0] = lastDimensionStride[0]; retStride[1] = lastDimensionStride[1]; } else { retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } } } } else { //copy starting from the tad shapes/strides that got permuted to the back int shapeOffset = shape::rank(shapeInfo) - originalDimensionLength; int *permutedShape = shape::shapeOf(toPermute) + shapeOffset; int *permutedStride = shape::stride(toPermute) + shapeOffset; //now that the dimensions are permuted, all of the tad shapes/strides are in the back //all we need to do is copy from the start of the tad dimensions to the end since they are //arranged in the right order shape::copyTo(originalDimensionLength, permutedStride, retStride); shape::copyTo(originalDimensionLength, permutedShape, retShape); } delete[] permuteIndexes; delete[] toPermute; } ret[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape::shapeOf(ret),shape::stride(ret),1); if(wholeThing) ret[shape::shapeInfoLength(rank) - 2] = 1; else ret[shape::shapeInfoLength(rank) - 2] = reductionIndexElementWiseStride(this->shapeInfo,dimension,dimensionLength); // we set offset to 0 here, just to avoid weird numbers. howerver, we should not use it anywhere ret[shape::shapeInfoLength(rank) - 3] = 0; return ret; } /** * Length of a tad given * the shape information */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadLength(int *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength) { return shape::length(shapeInfo) / this->tadLength(shapeInfo,dimension,dimensionLength); } #ifdef __CUDACC__ __host__ __device__ inline void createOffsetForBlock(int blockIdx) { this->tadOffsetForBlock = this->tadOffset(blockIdx); } #endif #ifdef __CUDACC__ __host__ __device__ #endif inline void collapse() { int *shape = shape::shapeOf(shapeInfo); //handle negative dimensions/backwards indexing for(int i = 0; i < dimensionLength; i++) { if((dimension)[i] < 0) (dimension)[i] += shape::rank(this->shapeInfo); } this->dimension = new int[dimensionLength]; memcpy(this->dimension,this->originalDimension,sizeof(int) * dimensionLength); //we can drop trailing dimensions where it's all singular for example: // shape: 4,3,1,2 //dimension: 0,2 // the problem for 0,2 is equivalent to: 0 //the rest of the algorithm handles cases suchas //shape: 4,1,1,2 //dimension: 0,1 //when this happens there are other dimensions (eg: at the end) that matter int trailingOneDimensions = 0; //trailing ones for(int i = dimensionLength - 1; i >= 0; i--) { if(shape[dimension[i]] != 1) { break; } else if(shape[dimension[i]] == 1) trailingOneDimensions++; } dimensionLength -= trailingOneDimensions; int leadingOneDimensions = 0; //trailing ones for(int i = 0; i < dimensionLength; i++) { if(shape[dimension[i]] != 1) { break; } else if(shape[dimension[i]] == 1) leadingOneDimensions++; } //bump the dimension pointer forward for however many leadingones there are dimension += leadingOneDimensions; //decrease the dimension length by the amount of leading ones dimensionLength -= leadingOneDimensions; bool preConverged = true; for(int i = 0; i < dimensionLength; i++) { if(shape[dimension[i]] == 1) { preConverged = false; break; } } //we took away all the singular dimensions, we can just return if(preConverged) return; //no more singular dimensions specified bool done = false; int onesDecrement = 0; bool changed = false; while(!done) { //terminate early: only singular dimensions specified for reduce if((dimensionLength) < 1) { done = true; //signal as a no op dimension[0] = -1; break; } //captures intermediary result from the for loop traceNew(3); int intermediaryResult[MAX_RANK]; for(int i = 0; i < dimensionLength; i++) { intermediaryResult[i] = (dimension)[i]; } bool oneEncountered = false; bool nonOneEncountered = false; bool hitBeginning = false; //assume intermediate collapsing of dimensions bool collapseMiddleDimensions = true; //note here that dimension length MAY end up being zero for(int i = (dimensionLength) - 1; i >= 0; i--) { if(shape[(dimension)[i]] == 1) { oneEncountered = true; //trailing ones if(!nonOneEncountered) { //just drop trailing ones dimensionLength--; nonOneEncountered = false; collapseMiddleDimensions = false; //intermediary result just needs to have the results copied from dimension since we're just removing the tail memcpy(intermediaryResult,dimension,sizeof(int) * dimensionLength); changed = true; //break the for loop and force it to go back around starting from the new index break; } else { //already decremented all dimensions //this was a result of hitting beginning ones //we will only need to loop once if(i == 0) { hitBeginning = true; } //will need to shift dimensions that aren't trailing ones //back by onesDecrement //mark the intermediary result as -1 for non inclusion intermediaryResult[i] = -1; onesDecrement++; } } else { intermediaryResult[i] = (dimension)[i]; nonOneEncountered = true; } } if(collapseMiddleDimensions && oneEncountered) { //collapse dimensions int newIntermediary[MAX_RANK]; int idx = 0; for(int i = 0; i < dimensionLength; i++) { //of note: dimension will decrease by the number of ones encountered if(intermediaryResult[i] >= 0) { //dimension 0 doesn't need to be decremented if(intermediaryResult[i] > 0) newIntermediary[idx++] = intermediaryResult[i] - onesDecrement; else newIntermediary[idx++] = intermediaryResult[i]; } } //decrement by the number of dimensions where ones appeared (dimensionLength) -= onesDecrement; //update to current result memcpy(dimension,newIntermediary,sizeof(int) * (dimensionLength)); changed = true; } //converged: no need to change result else { //update to current result memcpy(dimension,intermediaryResult,sizeof(int) * dimensionLength); } //converge when there are no singular dimensions specified in the reduce done = (!oneEncountered && nonOneEncountered) || hitBeginning; //delete[] intermediaryResult; } //nothing changed but need to collapse dimension if(!changed && this->numOnes > 0) { for(int i = 0; i < dimensionLength ;i++) { dimension[i] -= numOnes; } } } }; #ifdef __CUDACC__ template <typename T> __device__ inline int *cuMalloc(int *buffer, long size, UnifiedSharedMemory *manager) { // if we go for 3 dimensions coord space or below - just use shared memory for that if (size <= MAX_COORD * 4) { int *ptr = new int[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD); return ptr; } else { // otherwise go to preallocated global memory :( int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid * size > PREALLOC_SIZE - size) { return (int *) malloc(size); } else { int *ret = buffer; ret += (tid * size); return ret; } } } #endif #ifdef __CUDACC__ /** * BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES */ __device__ inline int *cuMalloc(int *buffer, long size) { int *ret = buffer; ret += (threadIdx.x * size); return ret; } #endif /** * Length of a tad given * the shape information */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadLength(int *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { int ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) ret *= shape::shapeOf(shapeInfo)[dimension[j]]; } } return ret; } } /** * Tad element wise stride: * given the inner most dimension (the sorted dimension of the last) * the element wise stride of the tad (disregarding order) is the * last dimension's stride. * * For a given singular dimension this will just be the only entry. * For example, given the following c order shape/stride: * 2,2,3,2 * 12,6,2,1 * * The tad element wise stride for 3 will be 1. * For zero it wil be 12 * * For 2,3 it's 1 * * Note here that the multi dimensional 2,3 case * is equivalent to the singular 3 case. * * * Note that this is for the dimension that ultimately * ends up removed. * * Again: this may not preserve ordering of the tad * but maybe used for reductions. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadElementWiseStride(int *shapeInfo,int *dimension,int dimensionLength) { return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength); } #ifdef __CUDACC__ __host__ __device__ #endif inline bool shapeEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } #ifdef __CUDACC__ __host__ __device__ #endif inline bool shapeEquals(int *shapeInfo1,int *shapeInfo2) { return shape::shapeEquals(shape::rank(shapeInfo1),shape::shapeOf(shapeInfo1),shape::rank(shapeInfo2),shape::shapeOf(shapeInfo2)); } #ifdef __CUDACC__ __host__ __device__ #endif inline bool strideEquals(int shape1Rank,int *shape1,int shape2Rank,int *shape2) { if(shape1Rank != shape2Rank) return false; //rank not equals for(int i = 0; i < shape1Rank; i++) { if(shape1[i] != shape2[i]) return false; } return true; } #ifdef __CUDACC__ __host__ __device__ #endif inline bool strideEquals(int *shapeInfo1,int *shapeInfo2) { return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2)); } #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride, int *buffer) { int *theShape = shape::shapeOf(shapeInfo); int *theStride = shape::stride(shapeInfo); int rank = dimensionLength == 1 ? 2 : dimensionLength; int *ret = buffer; //set the rank ret[0] = rank; int *retShape = shape::shapeOf(ret); int *retStride = shape::stride(ret); int len = rank; if(dimensionLength == 1) { if(shape::isMatrix(theShape,shape::rank(shapeInfo))) { if(dimension[0] == 0) { int newStride[2] = {theStride[dimension[0]],1}; int newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } else { int newStride[2] = {theStride[dimension[0]],1}; int newShape[2] = {theShape[dimension[0]],1}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { int newStride[2] = {1,theStride[dimension[0]]}; int newShape[2] = {1,theShape[dimension[0]]}; retShape[0] = newShape[0]; retShape[1] = newShape[1]; retStride[0] = newStride[0]; retStride[1] = newStride[1]; } } else { int *newIndexes = dimension; if(reverseCopyStride) shape::reverseCopyTo(theStride, retStride, newIndexes, len); else shape::copyTo(len, theStride, retStride, newIndexes); shape::copyTo(len, theShape, retShape, newIndexes); } ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo); return ret; } #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeInfoOnlyShapeAndStride(int *shapeInfo, int *dimension, int dimensionLength,bool reverseCopyStride) { int rank = dimensionLength == 1 ? 2 : dimensionLength; traceNew(4); int *ret = new int[shape::shapeInfoLength(rank)]; return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret); } #ifdef __CUDACC__ __host__ __device__ #endif inline int * createShapeInfo(int *shape, int *stride, int rank) { traceNew(5); int *ret = new int[shape::shapeInfoLength(rank)]; return createShapeInfo(shape, stride, rank, ret); } #ifdef __CUDACC__ __host__ __device__ #endif inline int * createShapeInfo(int *shape, int *stride, int rank, int *buffer) { buffer[0] = rank; int *retShape = shape::shapeOf(buffer); int *retStride = shape::stride(buffer); for(int i = 0;i < rank; i++) { retShape[i] = shape[i]; retStride[i] = stride[i]; } return buffer; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int * calcStridesFortran(int *shape, int rank, int startNum) { if (isVector(shape, rank)) { traceNew(5); int *ret = new int[2]; for (int i = 0; i < 2; i++) ret[i] = 1; return ret; } int dimensions = rank; traceNew(6); int *stride = new int[dimensions]; int st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; } return stride; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int * calcStrides(int *shape, int rank, int startNum) { traceNew(7); int *stride = new int[rank]; if (shape::isVector(shape, rank)) { for (int i = 0; i < 2; i++) stride[i] = 1; return stride; } int st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; } return stride; } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int * calcStridesFortran(int *shape, int rank) { return calcStridesFortran(shape, rank, 1); } /** * Computes the standard packed array strides for a given shape. * * @param shape the shape of a matrix: * @param startNum the start number for the strides * @return the strides for a matrix of n dimensions */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* calcStrides(int *shape, int rank) { return calcStrides(shape, rank, 1); } /** * @param toCopy the shape to copy * @return a copy of the original struct */ #ifdef __CUDACC__ __host__ __device__ #endif inline ShapeInformation *shapeCopy( ShapeInformation *toCopy) { ShapeInformation *copy = new ShapeInformation; traceNew(8); copy->shape = new int[toCopy->rank]; memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(int)); traceNew(9); copy->stride = new int[toCopy->rank]; for (int i = 0; i < toCopy->rank; i++) { copy->stride[i] = toCopy->stride[i]; } copy->order = toCopy->order; copy->rank = toCopy->rank; copy->offset = toCopy->offset; copy->elementWiseStride = toCopy->elementWiseStride; return copy; } #ifdef __CUDACC__ __host__ __device__ #endif inline int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder) { if(shape::isVector(shape,rank)) { return stride[rank - 1]; } else { int oldnd; int *olddims = shape::copyOf(rank, shape); int *oldstrides = shape::copyOf(rank, stride); int np, op, last_stride; int oi, oj, ok, ni, nj, nk; traceNew(10); int *newStrides = new int[rank]; oldnd = 0; //set the shape to be 1 x length int newShapeRank = 2; int *newShape = new int[newShapeRank]; newShape[0] = 1; newShape[1] = shape::prodLong(shape, rank); /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < rank; oi++) { if (shape[oi] != 1) { olddims[oldnd] = shape[oi]; oldstrides[oldnd] = stride[oi]; oldnd++; } } np = 1; for (ni = 0; ni < newShapeRank; ni++) { np *= newShape[ni]; } op = 1; for (oi = 0; oi < oldnd; oi++) { op *= olddims[oi]; } if (np != op) { /* different total sizes; no hope */ return -1; } if (np == 0) { /* the current code does not handle 0-sized arrays, so give up */ return -1; } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newShapeRank && oi < oldnd) { np = newShape[ni]; op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newShape[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (isFOrder) { if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) { /* not contiguous enough */ return -1; } } else { /* C order */ if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) { /* not contiguous enough */ return -1; } } } /* Calculate new strides for all axes currently worked with */ if (isFOrder) { newStrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1]; } } else { /* C order */ newStrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newStrides[nk - 1] = newStrides[nk] * newShape[nk]; } } ni = nj++; oi = oj++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (ni >= 1) { last_stride = newStrides[ni - 1]; } else { last_stride = stride[rank - 1]; } if (isFOrder) { if (ni >= 1) last_stride *= newShape[ni - 1]; } for (nk = ni; nk < newShapeRank; nk++) { newStrides[nk] = last_stride; } //returns the last element of the new stride array int ret = last_stride; delete[] newStrides; delete[] newShape; delete[] oldstrides; delete[] olddims; return ret; } } #ifdef __CUDACC__ __host__ __device__ #endif inline int computeElementWiseStride(int rank, int *shape, int *stride, int isFOrder, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return stride[dimension[0]]; } return -1; } /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeBuffer(int rank, int *shape) { int *stride = shape::calcStrides(shape, rank); traceNew(11); shape::ShapeInformation * shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'c'; shapeInfo->elementWiseStride = elementWiseStride; int *shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete[] stride; delete shapeInfo; return shapeInfoBuffer; } /** * This is special method, it returns ONLY 2D shapebuffer. * * This method is used only for SoftMax */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeBuffer(int rank, int *shape, int *buffer) { buffer[0] = rank; buffer[1] = shape[0]; buffer[2] = 1; buffer[3] = 1; buffer[4] = 1; buffer[5] = 0; buffer[6] = 1; buffer[7] = 99; return buffer; } /** * Get the shape info buffer * for the given rank and shape. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeBufferFortran(int rank, int *shape) { int *stride = shape::calcStridesFortran(shape,rank); traceNew(12); shape::ShapeInformation * shapeInfo = new shape::ShapeInformation(); shapeInfo->shape = shape; shapeInfo->stride = stride; shapeInfo->offset = 0; shapeInfo->rank = rank; int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0); shapeInfo->order = 'f'; shapeInfo->elementWiseStride = elementWiseStride; int *shapeInfoBuffer = shape::toShapeBuffer(shapeInfo); delete shapeInfo; return shapeInfoBuffer; } /** * Compute the real linear indices for the given shape and stride */ #ifdef __CUDACC__ __host__ __device__ #endif inline Nd4jIndex *computeIndices(int rank, int *shape, int *stride) { int length = shape::prodLong(shape,rank); traceNew(13); Nd4jIndex *ret = new Nd4jIndex[length]; for(int i = 0; i < length; i++) { int *idx = shape::ind2sub(rank, shape, i); ret[i] = shape::getOffset(0, shape, stride, idx, rank); delete[] idx; } return ret; } /** * Compute the real linear indices for the given shape and stride */ #ifdef __CUDACC__ __host__ __device__ #endif inline Nd4jIndex *computeIndices(int *shapeBuffer) { return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer)); } /** * Convert the given index (such as 1,1) * to a linear index * @param shape the shape of the indexes to convert * @param indices the index to convert * @return the linear index given the shape * and indices */ #ifdef __CUDACC__ __host__ __device__ #endif inline int sub2Ind(int rank, int *shape, int *indices) { int index = 0; int shift = 1; for(int i = 0; i < rank; i++) { index += shift * indices[i]; shift *= shape[i]; } return index; } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* ind2sub(int rank, int *shape, int index,int numIndices) { traceNew(14); int *ret = new int[rank]; ind2sub(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int* ind2sub(int rank, int *shape, int index) { return ind2sub(rank,shape, index,shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2sub(int rank, int *shape, int index, int numIndices, int *ret) { int denom = numIndices; for(int i = rank - 1; i >= 0; i--) { denom /= shape[i]; ret[i] = index / denom; index %= denom; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2sub(int rank,int *shape,int index, int *out) { ind2sub(rank,shape, index,shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int * ind2subC(int rank, int *shape, int index, int numIndices) { traceNew(15); int *ret = new int[rank]; ind2subC(rank, shape, index, numIndices, ret); return ret; } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *ind2subC(int rank, int *shape, int index) { return ind2subC(rank,shape, index, shape::prodLong(shape,rank)); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2subC(int rank, int *shape, int index, int numIndices, int *ret) { int denom = numIndices; for(int i = 0; i < rank; i++) { denom /= shape[i]; if(denom > 0) { ret[i] = index / denom; index %= denom; } else ret[i] = 0; } } /** * Convert a linear index to * the equivalent nd index. * Infers the number of indices from the specified shape. * * @param shape the shape of the dimensions * @param index the index to map * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2subC(int rank, int *shape, int index, int *out) { ind2subC(rank,shape, index,shape::prodLong(shape,rank),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2subOrder(int *shapeInfo, int index, int numIndices,int *out) { if(shape::order(shapeInfo) == 'f') { shape::ind2sub( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } else { shape::ind2subC( shape::rank(shapeInfo), shape::shapeOf(shapeInfo), index, numIndices, out); } } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline void ind2subOrder(int *shapeInfo, int index, int *out) { ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out); } /** * Convert a linear index to * the equivalent nd index * @param shape the shape of the dimensions * @param index the index to map * @param numIndices the number of total indices (typically prod of shape( * @return the mapped indexes along each dimension */ /** * * @param length * @param shape * @param rearrange * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *doPermuteSwap(int length, int *shape, int *rearrange) { traceNew(16); int *ret = new int[length]; for (int i = 0; i < length; i++) { ret[i] = shape[rearrange[i]]; } return ret; } /** * * @param length * @param shape * @param rearrange * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteSwap(int length, int **shape, int *rearrange) { int *shapeDeref = *shape; for (int i = 0; i < length; i++) { int x = shapeDeref[i]; int j = i; while (1) { int k = rearrange[j]; rearrange[j] = j; if (k == i) break; shapeDeref[j] = shapeDeref[k]; j = k; } shapeDeref[j] = x; } } #ifdef __CUDACC__ __host__ __device__ #endif inline void permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out) { memcpy(out,shapeBuffer,sizeof(int) * shape::shapeInfoLength(shape::rank(shapeBuffer))); doPermuteShapeBuffer(shape::rank(shapeBuffer),out,rearrange); } #ifdef __CUDACC__ __host__ __device__ #endif inline int *permuteShapeBuffer(int *shapeBuffer,int *rearrange) { int len = shape::shapeInfoLength(shape::rank(shapeBuffer)); int *copy = shape::copyOf(len,shapeBuffer); doPermuteShapeBuffer(copy,rearrange); return copy; } #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); int *rearrangeCopy1 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1); delete[] rearrangeCopy1; int *rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrangeCopy2); delete[] rearrangeCopy2; } #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int *shapeBuffer,int *rearrange, int *tmpBuffer) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = shape::rank(shapeRef); int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,tmpBuffer); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer); } #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); int *rearrangeCopy1 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1); delete[] rearrangeCopy1; int *rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange); shape::doPermuteSwap(rearrageRank,&stride,rearrangeCopy2); shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); delete[] rearrangeCopy2; } #ifdef __CUDACC__ __host__ __device__ #endif inline void doPermuteShapeBuffer(int rank,int *shapeBuffer,int *rearrange, int *tmpBuffer) { int *shapeRef = shapeBuffer; //rank of the rearrange array == rank of shape buffer int rearrageRank = rank; int *shape = shape::shapeOf(shapeRef); int *stride = shape::stride(shapeRef); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&shape,tmpBuffer); shape::copyOf(rearrageRank,rearrange, tmpBuffer); shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer); shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1); } #ifdef __CUDACC__ __host__ __device__ #endif inline int *createPermuteIndexes(int originalRank,int *dimension,int dimensionLength) { int delta = originalRank - dimensionLength; traceNew(17); int *ret = new int[originalRank]; for(int i = 0; i < delta; i++) { ret[i] = i + dimensionLength; } for(int i = delta; i < originalRank; i++) { ret[i] = i - delta; } return ret; } /** * Get the ordering for the device * @param length * @param shape * @param stride * @param elementStride * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline char getOrder(int length, int *shape, int *stride, int elementStride) { int sd = -1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; if (stride[i] != sd) { cContiguous = 0; break; } /* contiguous, if it got this far */ if (dim == 0) { break; } sd *= dim; } /* check if fortran contiguous */ sd = elementStride; for (i = 0; i < length; ++i) { dim = shape[i]; if (stride[i] != sd) { isFortran = 0; } if (dim == 0) { break; } sd *= dim; } if (isFortran && cContiguous) return 'a'; else if (isFortran && !cContiguous) return 'f'; else if (!isFortran && !cContiguous) return 'c'; else return 'c'; } /** * Ensure that every value in the re arrange * array is unique * @param arr * @param shape * @param arrLength * @param shapeLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int checkArrangeArray(int *arr, int arrLength, int shapeLength) { if (arrLength != shapeLength) return -1; for (int i = 0; i < arrLength; i++) { if (arr[i] >= arrLength || arr[i] < 0) return -1; } for (int i = 0; i < arrLength; i++) { for (int j = 0; j < arrLength; j++) { if (i != j && arr[i] == arr[j]) return -1; } } return 1; } /** * Permute the shape information * @param info the shape information to permute * @param rearrange the order to re arrange * @param rank the rank of the rearrange array */ #ifdef __CUDACC__ __host__ __device__ #endif inline void permute(ShapeInformation **info, int *rearrange, int rank) { ShapeInformation *infoDeref = *info; checkArrangeArray(rearrange, rank, rank); shape::doPermuteSwap(rank, &infoDeref->shape, rearrange); shape::doPermuteSwap(rank, &infoDeref->stride, rearrange); char order = getOrder(rank, infoDeref->shape, infoDeref->stride, infoDeref->elementWiseStride); infoDeref->order = order; } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isVector(int *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 1; } return 0; } #ifdef __CUDACC__ __host__ __device__ #endif inline int isVector(int *shapeInfo) { return isVector(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } #ifdef __CUDACC__ __host__ __device__ #endif inline int oneDimEqualToLength(int *shape, int rank) { for(int i = 0; i < rank; i++) { if(shape[i] == shape::prod(shape,rank)) return 1; } return 0; } #ifdef __CUDACC__ __host__ __device__ #endif inline int oneDimEqualToLength(int *shapeInfo) { return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns whether the * given shape is a vector or not * @param shape the shape of the array * @param rank the rank of the shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isMatrix(int *shape, int rank) { if (rank > 2) return 0; else if (rank <= 2) { if (shape[0] == 1 || shape[1] == 1) return 0; } return 1; } #ifdef __CUDACC__ __host__ __device__ #endif inline int isMatrix(int *shapeInfo) { return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo)); } /** * Returns the shape portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *shapeOf(int *buffer) { return buffer + 1; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *copyOf(int length, int *toCopy) { traceNew(18); int *ret = new int[length]; return copyOf(length, toCopy, ret); } #ifdef __CUDACC__ __host__ __device__ #endif inline int *copyOf(int length, int *toCopy, int *ret) { memcpy(ret, toCopy, sizeof(int)*length); return ret; } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif inline void copyTo(int length, int *from, int *to) { memcpy(to, from, sizeof(int)*length); } /** * Return a copy of a buffer. * This buffer allocates memory * that must be freed elsewhere. */ #ifdef __CUDACC__ __host__ __device__ #endif inline void copyTo(int length, int *from, int *to, int *indexes) { for(int i = 0; i < length; i++) { to[i] = from[indexes[i]]; } } /** * Permute the given strides * in the given rearrange order * @param toPermute the buffer to permute * @param shapeRank the length of the buffer to permute * @param rearrange the rearrange order (must be 0 based indexes * and all must be filled in) * @return the rearranged array */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *permutedStrides(int *toPermute, int shapeRank, int *rearrange) { int *strideCopy = copyOf(shapeRank, toPermute); checkArrangeArray(rearrange, shapeRank, shapeRank); int *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange); delete[] strideCopy; return newStride; } /** * Return the slice (shape + 1 in pointer arithmetic) * @param shape the shape to take the slice of * @return the shape array - the first entry */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *slice(int *shape) { return shape + 1; } /** * Returns the length of the * shape information buffer: * rank * 2 + 3 * @param rank the rank to get the shape * info length for * @return rank * 2 + 4 */ #ifdef __CUDACC__ __host__ __device__ #endif inline int shapeInfoLength(int rank) { //FIXME magic numbers return rank * 2 + 4; } /** * Returns the rank portion of * an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline int rank( int *buffer) { return buffer[0]; } /** * Converts a raw int buffer of the layout: * rank * shape * stride * offset * elementWiseStride * * where shape and stride are both straight int pointers */ #ifdef __CUDACC__ __host__ __device__ #endif inline ShapeInformation *infoFromBuffer(int *buffer) { traceNew(19); ShapeInformation *info = new ShapeInformation; int length = shapeInfoLength(rank(buffer)); int rank = buffer[0]; //start after rank info->shape = buffer + 1; info->stride = buffer + (1 + rank); info->rank = rank; info->offset = buffer[length - 3]; info->elementWiseStride = buffer[length - 2]; int *stride = buffer + 1 + rank; info->stride = stride; info->order = (char) buffer[length - 1]; return info; } /** * Returns the stride portion of an information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *stride( int *buffer) { return buffer + (1 + rank(buffer)); } /** * Compute the length of the given shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int length(int *shapeInfo) { return shape::prodLong(shape::shapeOf(shapeInfo), shape::rank(shapeInfo)); } /*** * Returns the offset portion of an information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline int offset(int *buffer) { return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3]; } /** * Returns the ordering * for this shape information buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline char order(int *buffer) { //FIXME magic numbers return (char) buffer[(buffer[0] * 2 + 4) - 1]; } /** * Returns the element wise stride for this information * buffer */ #ifdef __CUDACC__ __host__ __device__ #endif inline int elementWiseStride(int *buffer) { return buffer[shapeInfoLength(buffer[0]) - 2]; } /** * Returns the element wise stride for this information * buffer relative to a dimension and reduction index */ #ifdef __CUDACC__ __host__ __device__ #endif inline int reductionIndexElementWiseStride(int *buffer, int *dimension, int dimensionLength) { if(dimensionLength > 1) { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) { int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } return 1; } } else { if(shape::order(buffer) == 'f') { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ int tadElementWiseStride = shape::stride(buffer)[dimension[0]]; return tadElementWiseStride; } else { /** * The element wise stride belongs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along arr * we can use arr.stride(1) as a representation * along which to iterate. */ int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]]; return tadElementWiseStride; } } } /** * Returns whether * the given shape info buffer * represents a scalar shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isScalar(int *info) { if (shape::rank(info) > 2) return 0; if (shape::rank(info) == 1) return shape::shapeOf(info)[0] == 1; else if (rank(info) == 2) { return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1; } return 0; } /** * Returns whether * the given shape information * represents a scalar * shape or not */ #ifdef __CUDACC__ __host__ __device__ #endif inline int isScalar(volatile ShapeInformation *info) { if (info->rank > 2) return 0; if (info->rank == 1) return info->shape[0] == 1; else if (info->rank == 2) { return info->shape[0] == 1 && info->shape[1] == 1; } return 0; } /** * Return a copy of this array with the * given index omitted * * @param data the data to copy * @param indexes the index of the item to remove * @param dataLength the length of the data array * @param indexesLength the length of the data array * @return the new array with the omitted * * item */ #ifdef __CUDACC__ __host__ __device__ #endif inline void removeIndex(int *data, int *indexes, int dataLength, int indexesLength, int *ret) { int count = 0; int absLength = dataLength - indexesLength; for (int i = 0; i < dataLength && count < absLength; i++) { int contains = 0; for (int j = 0; j < indexesLength; j++) { if (i == indexes[j]) { contains = 1; break; } } if (!contains) { ret[count] = data[i]; count++; } } } #ifdef __CUDACC__ __host__ __device__ #endif inline int* everyIndexBut(int *indexes,int indexesLength,int begin,int end) { int len = end - indexesLength; traceNew(20); int *ret = new int[len]; int retIdx = 0; //not here that we do 0 based indexing for end - this assumes things like: //0 to 4 are specified for(int i = begin; i < end ; i++) { bool found = false; for(int j = 0; j < indexesLength; j++) { if(indexes[j] == i) { found = true; break; } } if(!found) { ret[retIdx++] = i; } } return ret; } /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ #ifdef __CUDACC__ __device__ int tadOffset(ShapeInformation *xInfo, int offset) { return offset + threadIdx.x * xInfo->elementWiseStride; } #endif /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *ensureVectorShape(int *shape, int dimension) { traceNew(21); int *ret = new int[2]; if (dimension == 0) { ret[0] = 1; ret[1] = shape[0]; } else { ret[0] = shape[0]; ret[1] = 1; } return ret; } /** * Returns a shape * forces the given length to be 2. * @param shape the shape to modify * @param dimension the dimension (row or column) * for the shape to be returned as * @return the new shape */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *ensureVectorShape(int *shape) { return ensureVectorShape(shape, 0); } /** * Generate an int buffer * up to the given length * at the specified increment * */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *range(int from, int to, int increment) { int diff = nd4j::math::nd4j_abs<int>(from - to); int retLength = diff / increment; int *ret; traceNew(22); if(diff / increment < 1) ret = new int[1]; else ret = new int[diff / increment]; if (from < to) { int count = 0; for (int i = from; i < to; i += increment) { if (count >= retLength) break; ret[count++] = i; } } else if (from > to) { int count = 0; for (int i = from - 1; i >= to; i -= increment) { if (count >= retLength) break; ret[count++] = i; } } return ret; } /** * Generate a range * beginning at from and ending at to * incrementing by 1 * @param from the start * @param to the end * @return the int array starting at from and ending at to */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *range(int from, int to) { return range(from, to, 1); } /** * Keep the given indexes in the data * @param data * @param index * @param indexLength * @param dataLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *keep(volatile int *data, int *index, int indexLength, int dataLength) { traceNew(23); int *ret = new int[indexLength]; int count = 0; for (int i = 0; i < dataLength; i++) { int contains = 0; for (int j = 0; j < indexLength; j++) { if (i == index[j]) { contains = 1; break; } } if (contains) ret[count++] = data[i]; } return ret; } /** * Generate a reverse * copy of the data */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *reverseCopy(int *data, int length) { if (length < 1) return nullptr; traceNew(24); int *copy = new int[length]; for (int i = 0; i <= length / 2; i++) { int temp = data[i]; copy[i] = data[length - i - 1]; copy[length - i - 1] = temp; } return copy; } #ifdef __CUDACC__ __host__ __device__ #endif inline void reverseCopyTo(int *from, int *to, int length) { if (length < 1) return; for (int i = 0; i <= length / 2; i++) { int temp = from[i]; to[i] = from[length - i - 1]; to[length - i - 1] = temp; } } #ifdef __CUDACC__ __host__ __device__ #endif inline void reverseCopyTo(int *from, int *to, int *indexes, int length) { if (length < 1) return; for (int i = 0; i <= length / 2; i++) { int temp = from[indexes[i]]; to[i] = from[indexes[length - i - 1]]; to[length - i - 1] = temp; } } /** * * @param arr1 * @param arr1Length * @param arr2 * @param arr2Length * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *concat(int *arr1, int arr1Length, int *arr2, int arr2Length) { traceNew(25); int *ret = new int[arr1Length + arr2Length]; std::memcpy(ret, arr1, arr1Length * sizeof(int)); std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(int)); return ret; } /** * * @param numArrays * @param numTotalElements * @param arr * @param lengths * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *concat(int numArrays, int numTotalElements, int **arr, int *lengths) { traceNew(26); int *ret = new int[numTotalElements]; int count = 0; #pragma omp simd for (int i = 0; i < numArrays; i++) { for (int j = 0; j < lengths[i]; j++) { ret[count++] = arr[i][j]; } } return ret; } /** * Get the length per slice of the * given shape and the dimension * @param rank the rank of the shape * @param shape the shape of to get * the length per slice for * @param dimension the dimension to * get the length per slice for * @param dimensionLength the length of the dimension array * @return the length per slice of the given shape * along the given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int lengthPerSlice(int rank, int *shape, int *dimension, int dimensionLength) { int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength); traceNew(27); int *ret2 = new int[absSelta]; removeIndex(shape, dimension, rank, dimensionLength, ret2); int length = rank - dimensionLength; int ret = prod(ret2, length); delete[] ret2; return ret; } /** * calculates the offset for a tensor * @param index * @param arr * @param tensorShape * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int sliceOffsetForTensor(int rank, int index, int *shape, int *tensorShape, int tensorShapeLength, int *dimension, int dimensionLength) { int tensorLength = prodLong(tensorShape, tensorShapeLength); int lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength); if (lengthPerSlice2 <= 0) { return 0; } int offset = index * tensorLength / lengthPerSlice2; return offset; } #ifdef __CUDACC__ /** * Computes the offset for accessing * a global element given the shape information * and the offset to be read. */ __device__ int tadOffset(int *xInfo, int offset) { return offset + threadIdx.x * elementWiseStride(xInfo); } #endif /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tensorsAlongDimension(volatile int rank, volatile int length, volatile int *shape, int *dimension, int dimensionLength) { int *tensorShape = shape::keep(shape, dimension, dimensionLength, rank); int ret = length / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Computes the number * of tensors along * a given dimension */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength) { int *keepShape = shape::shapeOf(shapeInfo); int *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo)); int ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength); delete[] tensorShape; return ret; } /** * Get an offset for retrieval * from a data buffer * based on the given * shape stride and given indices * @param baseOffset the offset to start from * @param shape the shape of the array * @param stride the stride of the array * @param indices the indices to iterate over * @return the double at the specified index */ #ifdef __CUDACC__ __host__ __device__ #endif int getOffset(int baseOffset, int *shape, int *stride, int *indices, int rank) { int offset = baseOffset; for(int i = 0; i < rank; i++) { if(indices[i] >= shape[i] && shape[i] != 1) { printf("Index %d [%d] must not be >= shape[%d].\n", i,indices[i],shape[i]); return -1; } if(shape[i] != 1) { offset += (int) indices[i] * stride[i]; } } return offset; } /** * Returns the tensor along dimension * for the given block index * @param blockSize * @param blockIdx * @param i * @return */ #ifdef __CUDACC__ __device__ __host__ #endif inline int tadForBlockIndex(int blockSize, int blockIdx, int i) { return blockIdx + i * blockSize; } /** * Computes the number of tads per block * */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadsPerBlock(int blockSize, int tads) { return nd4j::math::nd4j_ceil<double>(tads / (double) blockSize); } /** * Returns a shape buffer * for the shape information metadata. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int *toShapeBuffer( ShapeInformation *info) { traceNew(29); int *ret = new int[shapeInfoLength(info->rank)]; int count = 1; int rank = info->rank; ret[0] = info->rank; #pragma omp simd for (int i = 0; i < rank; i++) { ret[count++] = info->shape[i]; } #pragma omp simd for (int i = 0; i < rank; i++) { ret[count++] = info->stride[i]; } ret[count++] = info->offset; ret[count++] = info->elementWiseStride; ret[count++] = info->order; return ret; } #ifdef __CUDACC__ __host__ __device__ #endif void printIntArray(int *arr,int length) { for(int i = 0; i < length; i++) { printf(" %d ",arr[i]); } printf("\n"); } #ifdef __CUDACC__ __host__ __device__ #endif void printShapeInfo(int *shapeInfo) { int rank = shape::rank(shapeInfo); int *shape = shape::shapeOf(shapeInfo); printf("Rank %d\n",rank); printf("Shape:\n"); for(int i = 0; i < rank; i++) { printf(" %d ",shape[i]); } printf("\n"); int *stride = shape::stride(shapeInfo); printf("Stride:\n"); for(int i = 0; i < rank; i++) { printf(" %d ",stride[i]); } printf("\n"); printf("Order %c\n",shape::order(shapeInfo)); } #ifdef __CUDACC__ __host__ __device__ #endif void printShapeInfoLinear(int *shapeInfo) { int rank = shape::rank(shapeInfo); printf("ShapeInfo: ["); for (int i = 0; i < rank * 2 + 4; i++) { printf("%i, ", shapeInfo[i]); } printf("]\n"); } #ifdef __CUDACC__ __host__ __device__ #endif inline void printArray(float *arr,int length) { printf("Array: ["); for (int i = 0; i < length; i ++) { printf("%f", arr[i]); if (i + 1 < length) printf(", "); } printf("]\n"); } /** * Given an linear index, element wise stride * and the length of each tad * map a linear index to a tad * @param i the index to map * @param the element wise stride for the tads * @param numElementsPerTad the number of elements * per tad */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadIndex(int i, int elementWiseStride, int numElementsPerTad) { return i / (numElementsPerTad * elementWiseStride); } /** * Map a tad to a * reduction index. * @param tadIndexForOriginal the original tad index for the * split up problem (eg: split is dimension 3 mapping to a 2,3 problem) * @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3) * @param tadsForOriginal the number of tads for the smaller problem (eg: 3) */ #ifdef __CUDACC__ __host__ __device__ #endif inline int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced, int tadsForOriginal) { if (tadIndexForOriginal == 0) return 0; return tadIndexForOriginal / (tadsForOriginal / tadsForReduced); } /** * Tad index for linear * @param linearIndex * @param tadLength * @return */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadIndexForLinear(int linearIndex, int tadLength) { return linearIndex % tadLength; } /** * Computes the number of tads * per reduce index for the * reduction tad. */ #ifdef __CUDACC__ __host__ __device__ #endif inline int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) { return tadsForOriginal / tadsForReduce; } /** * Maps a linear index to a reduction index * @param i the linear index to map * @param elementWiseStride the element wise stride * for the multiple problem * @param tadNum the number of tads for the shrunken problem * @param originalTadNum the tad number for the reduced version of the problem */ #ifdef __CUDACC__ __host__ __device__ #endif inline int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad, int tadNum, int originalTadNum) { int tad = tadIndex(i, elementWiseStride, numElementsPerTad); return reductionIndexForTad(tad, tadNum, originalTadNum); } #ifdef __CUDACC__ __host__ __device__ #endif inline int* createScalarShapeInfo() { traceNew(30); int *shape = new int[2]; shape[0] = 1; shape[1] = 1; int *stride = new int[2]; stride[0] = 1; stride[1] = 1; ShapeInformation *shapeInformation2 = new ShapeInformation(); shapeInformation2->rank = 2; shapeInformation2->offset = 0; shapeInformation2->stride = stride; shapeInformation2->shape = shape; shapeInformation2->elementWiseStride = 1; int *ret = shape::toShapeBuffer(shapeInformation2); delete shapeInformation2; return ret; } #ifdef __CUDACC__ __host__ __device__ #endif inline int* createScalarShapeInfo(int *ret) { ret[0] = 2; ret[1] = 1; ret[2] = 1; ret[3] = 1; ret[4] = 1; ret[5] = 0; ret[6] = 1; ret[7] = 99; return ret; } /** * Returns the prod of the data * up to the given length */ #ifdef __CUDACC__ __host__ __device__ #endif inline int prod(int *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } /** * Returns the prod of the data * up to the given length */ #ifdef __CUDACC__ __host__ __device__ #endif inline int prodLong( int *data, int length) { int prod = 1; for (int i = 0; i < length; i++) { prod *= data[i]; } return prod; } #ifdef __CUDACC__ __host__ __device__ #endif inline int rearMostLeftOverItem(int *data, int *dimension,int dimensionLength) { int *stride = shape::stride(data); //corner case: return the final item when its greater than the max, since its guaranteed to be left over //note here that strides are interpreted in reverse for tad //start from the front rather than the back int rank = shape::rank(data); if(shape::order(data) == 'f') { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } else { int dimIdx = dimensionLength - 1; for(int i = rank - 1; i >= 0; i--) { /** * Needs to find an algorithm such that: * looping backwards will find the highest dimension left * that isn't included in the dimension index list. * * This can also be thought of as the last item of the first index * of the difference between the full list of indices and * the dimension indices. * * We should avoid excessive object creation by only looping backwards. */ if(dimension[dimIdx--] != i) { int ret = stride[i]; return ret; } } } int ret = stride[0]; return ret; } #ifdef __CUDACC__ __device__ inline void sweepShapeInfoBuffer(int *shapeInfoBuffer, int *targetBuffer) { // we read first element, to find out length of our shapeInfoBuffer int rank = shapeInfoBuffer[0]; int len = rank * 2 + 4; for (int i = threadIdx.x; i < len; i += blockDim.x) targetBuffer[i] = shapeInfoBuffer[i]; } #endif } #endif /* SHAPE_H_ */
kncmbpush3.c
/* KNC C Library for Skeleton 3D Electromagnetic Vector PIC Code */ /* written by Viktor K. Decyk, UCLA and Ricardo Fonseca, ISCTE */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include <math.h> #include <string.h> #include <immintrin.h> #include "kncmbpush3.h" /*--------------------------------------------------------------------*/ void ckncgbppush3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1,int ipbc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. OpenMP/vector version using guard cells data read in tiles particles stored segmented array 190 flops/particle, 1 divide, 54 loads, 6 stores input: all, output: ppart, ek velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and omz = (q/m)*bz(x(t),y(t),z(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt z(t+dt)=z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + .25*(vz(t+dt/2) + vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float qtmh, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm; __m512 v_qtmh, v_dt, v_dtc, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \ vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \ omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1, \ v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp, \ v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy, \ v_oz,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ll = (mz < nz-loff ? mz : nz-loff) + 1; nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* time-centered kinetic energy */ /* sum1 += (acx*acx + acy*acy + acz*acz); */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* calculate cyclotron frequency */ /* omxt = qtmh*ox; */ /* omyt = qtmh*oy; */ /* omzt = qtmh*oz; */ e = _mm512_mul_ps(v_qtmh,v_ox); f = _mm512_mul_ps(v_qtmh,v_oy); g = _mm512_mul_ps(v_qtmh,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new velocity */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* new position */ /* dx = x + vx*dtc; */ /* dy = y + vy*dtc; */ /* dz = z + vz*dtc; */ v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* vz = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new velocity */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* new position */ dx = x + vx*dtc; dy = y + vy*dtc; dz = z + vz*dtc; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; vz = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new velocity */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); } /* normalize kinetic energy */ *ek += 0.5f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgbppushf3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. also determines list of particles which are leaving this tile OpenMP/vector version using guard cells data read in tiles particles stored segmented array 190 flops/particle, 1 divide, 54 loads, 6 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and omz = (q/m)*bz(x(t),y(t),z(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt z(t+dt)=z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + .25*(vz(t+dt/2) + vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float qtmh, dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm; __m512 v_qtmh, v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; anx = (float) nx; any = (float) ny; anz = (float) nz; sum2 = 0.0; /* set boundary values */ v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \ y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \ omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \ edgelx,edgely,edgelz,edgerx,edgery,edgerz,sum1,v_noff,v_moff,v_loff, \ v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy, \ v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz,v_at,v_edgelx, \ v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d,v_sum1,a,b,c,d,e,f,g, \ h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* load local fields from global array */ nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* time-centered kinetic energy */ /* sum1 += (acx*acx + acy*acy + acz*acz); */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* calculate cyclotron frequency */ /* omxt = qtmh*ox; */ /* omyt = qtmh*oy; */ /* omzt = qtmh*oz; */ e = _mm512_mul_ps(v_qtmh,v_ox); f = _mm512_mul_ps(v_qtmh,v_oy); g = _mm512_mul_ps(v_qtmh,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new velocity */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* new position */ /* dx = x + vx*dtc; */ /* dy = y + vy*dtc; */ /* dz = z + vz*dtc; */ v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z); /* find particles going out of bounds */ /* mm = 0; */ v_mm = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* mm = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) v_dx = v_x; } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dx = v_x; } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* mm += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) v_dy = v_x; } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dy = v_x; } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* mm += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) v_dz = v_x; } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* mm += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* mm += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dz = v_x; } } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new velocity */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(kk,v_mm); for (i = 0; i < 16; i++) { mm = kk[i]; if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* new position */ dx = x + vx*dtc; dy = y + vy*dtc; dz = z + vz*dtc; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new velocity */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* normalize kinetic energy */ *ek += 0.5f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgrbppush3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. OpenMP/vector version using guard cells data read in tiles particles stored segmented array 202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores input: all, output: ppart, ek momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t))*gami, omy = (q/m)*by(x(t),y(t),z(t))*gami, omz = (q/m)*bz(x(t),y(t),z(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg z(t+dt) = z(t) + pz(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum px of particle n in tile m ppart[m][4][n] = momentum py of particle n in tile m ppart[m][5][n] = momentum pz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float qtmh, ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg; float x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm; __m512 v_qtmh, v_ci2, v_dt, v_dtc, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \ vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \ omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2, \ gami,qtmg,dtg,sum1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x, \ v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx, \ v_vy,v_vz,v_ox,v_oy,v_oz,v_gami,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r, \ s,msk,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ll = (mz < nz-loff ? mz : nz-loff) + 1; nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* find inverse gamma */ /* p2 = acx*acx + acy*acy + acz*acz; */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* time-centered kinetic energy */ /* sum1 += gami*p2/(1.0f + gami); */ v_at = _mm512_mul_ps(v_gami,v_at); v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami)); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* renormalize magnetic field */ /* qtmg = qtmh*gami; */ v_at = _mm512_mul_ps(v_qtmh,v_gami); /* calculate cyclotron frequency */ /* omxt = qtmg*ox; */ /* omyt = qtmg*oy; */ /* omzt = qtmg*oz; */ e = _mm512_mul_ps(v_at,v_ox); f = _mm512_mul_ps(v_at,v_oy); g = _mm512_mul_ps(v_at,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new momentum */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* update inverse gamma */ /* p2 = vx*vx + vy*vy + vz*vz; */ v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx)); v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at); /* dtg = dtc/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_mul_ps(v_dtc,v_at); */ /* full accuracy calculation */ v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_at = _mm512_div_ps(v_dtc,v_at); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_div_ps(v_dtc,v_at); */ /* new position */ /* dx = x + vx*dtg; */ /* dy = y + vy*dtg; */ /* dz = z + vz*dtg; */ v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* vz = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new momentum */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0f + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* update inverse gamma */ p2 = vx*vx + vy*vy + vz*vz; dtg = dtc/sqrtf(1.0f + p2*ci2); /* new position */ dx = x + vx*dtg; dy = y + vy*dtg; dz = z + vz*dtg; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; vz = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new momentum */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgrbppushf3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. also determines list of particles which are leaving this tile OpenMP/vector version using guard cells data read in tiles particles stored segmented array 202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t))*gami, omy = (q/m)*by(x(t),y(t),z(t))*gami, omz = (q/m)*bz(x(t),y(t),z(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg z(t+dt) = z(t) + pz(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum px of particle n in tile m ppart[m][4][n] = momentum py of particle n in tile m ppart[m][5][n] = momentum pz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg; float qtmh, ci2, x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm; __m512 v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_qtmh, v_ci2, v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; anx = (float) nx; any = (float) ny; anz = (float) nz; sum2 = 0.0; /* set boundary values */ v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \ y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \ omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \ edgelx,edgely,edgelz,edgerx,edgery,edgerz,p2,gami,qtmg,dtg,sum1,v_noff, \ v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp, \ v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz, \ v_gami,v_at,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d, \ v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* load local fields from global array */ nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* find inverse gamma */ /* p2 = acx*acx + acy*acy + acz*acz; */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* time-centered kinetic energy */ /* sum1 += gami*p2/(1.0f + gami); */ v_at = _mm512_mul_ps(v_gami,v_at); v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami)); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* renormalize magnetic field */ /* qtmg = qtmh*gami; */ v_at = _mm512_mul_ps(v_qtmh,v_gami); /* calculate cyclotron frequency */ /* omxt = qtmg*ox; */ /* omyt = qtmg*oy; */ /* omzt = qtmg*oz; */ e = _mm512_mul_ps(v_at,v_ox); f = _mm512_mul_ps(v_at,v_oy); g = _mm512_mul_ps(v_at,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new momentum */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* update inverse gamma */ /* p2 = vx*vx + vy*vy + vz*vz; */ v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx)); v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at); /* dtg = dtc/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_mul_ps(v_dtc,v_at); */ /* full accuracy calculation */ v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_at = _mm512_div_ps(v_dtc,v_at); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_div_ps(v_dtc,v_at); */ /* new position */ /* dx = x + vx*dtg; */ /* dy = y + vy*dtg; */ /* dz = z + vz*dtg; */ v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z); /* find particles going out of bounds */ /* mm = 0; */ v_mm = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* mm = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) v_dx = v_x; } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dx = v_x; } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* mm += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) v_dy = v_x; } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dy = v_x; } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* mm += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) v_dz = v_x; } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* mm += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* mm += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dz = v_x; } } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new momentum */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(kk,v_mm); for (i = 0; i < 16; i++) { mm = kk[i]; if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0f + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* update inverse gamma */ p2 = vx*vx + vy*vy + vz*vz; dtg = dtc/sqrtf(1.0f + p2*ci2); /* new position */ dx = x + vx*dtg; dy = y + vy*dtg; dz = z + vz*dtg; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new momentum */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgppost3lt(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1) { /* for 3d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 33 flops/particle, 11 loads, 8 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz) q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz) q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz) q(n+1,m+1,l)=qm*dx*dy*(1.-dz) q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz q(n+1,m,l+1)=qm*dx*(1.-dy)*dz q(n,m+1,l+1)=qm*(1.-dx)*dy*dz q(n+1,m+1,l+1)=qm*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m q[l][k][j] = charge density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 6 mx/my/mz = number of grids in sorting cell in x/y/z nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 nzv = third dimension of charge array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz; __m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_one; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_as, v_at; __m512 a, b, c, d, e, f, g, h, qp, qr; __mmask16 msk, msks, v_m; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) float sq[MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx + 1; myv = my + 1; mxyv = mxv*myv; nxyv = nxv*nyv; v_mxv = _mm512_set1_epi32(mxv); v_mxyv = _mm512_set1_epi32(mxyv); v_qm = _mm512_set1_ps(qm); v_one = _mm512_set1_ps(1.0f); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \ dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \ v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,a,b,c, \ d,e,f,g,h,qp,qr,msk,msks,kk,sq) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < mxyv*(mz+1); j++) { */ /* sq[j] = 0.0f; */ /* } */ memset((void*)sq,0,mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm)); v_nn = _mm512_add_epi32(v_nn,v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* d = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit charge */ /* x = sq[nn] + amx*amz; */ /* y = sq[nn+1] + amy*amz; */ /* z = sq[nn+mxv] + dyp*amz; */ /* w = sq[nn+1+mxv] + dx1*amz; */ /* sq[nn] = x; */ /* sq[nn+1] = y; */ /* sq[nn+mxv] = z; */ /* sq[nn+1+mxv] = w; */ /* mm = nn + mxyv; */ /* x = sq[mm] + amx*dzp; */ /* y = sq[mm+1] + amy*dzp; */ /* z = sq[mm+mxv] + dyp*dzp; */ /* w = sq[mm+1+mxv] + dx1*dzp; */ /* sq[mm] = x; */ /* sq[mm+1] = y; */ /* sq[mm+mxv] = z; */ /* sq[mm+1+mxv] = w; */ /* deposit charge for two particles at a time */ for (i = 0; i < 8; i++) { /* first particle */ mm = kk[2*i]; msk = _mm512_int2mask(3<<(2*i)); msks = _mm512_int2mask(2<<(2*i)); qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)a,msks, (__m512i)b,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)c,msks, (__m512i)d,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); mm = mm + mxyv; qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)e,msks, (__m512i)f,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)g,msks, (__m512i)h,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); /* second particle */ mm = kk[2*i+1]; msks = _mm512_int2mask(1<<(2*i)); qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)b,msks, (__m512i)a,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)d,msks, (__m512i)c,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); mm = mm + mxyv; qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)f,msks, (__m512i)e,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)h,msks, (__m512i)g,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); amx = qm - dxp; amy = 1.0f - dyp; amz = 1.0f - dzp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amy = dxp*amy; /* deposit charge */ x = sq[nn] + amx*amz; y = sq[nn+1] + amy*amz; z = sq[nn+mxv] + dyp*amz; w = sq[nn+1+mxv] + dx1*amz; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + amx*dzp; y = sq[mm+1] + amy*dzp; z = sq[mm+mxv] + dyp*dzp; w = sq[mm+1+mxv] + dx1*dzp; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } /* deposit charge to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 16*(nn/16); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */ /* += sq[i+mxv*j+mxyv*k]; */ /* } */ for (i = 0; i < nps; i+=16) { m = i + mxv*j + mxyv*k; v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]); v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]); m = i + noff + nxv*(j + moff) + nxyv*(k + loff); v_at = _mm512_loadunpacklo_ps(v_at,&q[m]); v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]); /* skip add for first element for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&q[m],v_at); _mm512_packstorehi_ps(&q[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m ; i < nn; i++) { q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[i+mxv*j+mxyv*k]; } } } /* deposit charge to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j]; if (lm > mz) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[i+mxv*j+mxyv*(lm-1)]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)] += sq[i+mxv*(mm-1)+mxyv*k]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)] += sq[nm-1+mxv*j+mxyv*k]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)] += sq[i+mxv*(mm-1)+mxyv*(lm-1)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[mxv*j+mxyv*(lm-1)]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[nm-1+mxv*j+mxyv*(lm-1)]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cknc2gppost3lt(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1) { /* for 3d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 33 flops/particle, 11 loads, 8 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz) q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz) q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz) q(n+1,m+1,l)=qm*dx*dy*(1.-dz) q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz q(n+1,m,l+1)=qm*dx*(1.-dy)*dz q(n,m+1,l+1)=qm*(1.-dx)*dy*dz q(n+1,m+1,l+1)=qm*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m q[l][k][j] = charge density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 6 mx/my/mz = number of grids in sorting cell in x/y/z nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 nzv = third dimension of charge array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz; __m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_one; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_as, v_at; __mmask16 v_m; __attribute__((aligned(64))) unsigned int kk[16]; typedef union vfloat {float v[16]; __m512 v16;} vf; __attribute__((aligned(64))) float sq[MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */ vf vv[8]; mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx + 1; myv = my + 1; mxyv = mxv*myv; nxyv = nxv*nyv; v_mxv = _mm512_set1_epi32(mxv); v_mxyv = _mm512_set1_epi32(mxyv); v_qm = _mm512_set1_ps(qm); v_one = _mm512_set1_ps(1.0f); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \ dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \ v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,kk,sq,vv) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < mxyv*(mz+1); j++) { */ /* sq[j] = 0.0f; */ /* } */ memset((void*)sq,0,mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* vector loop over particles in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm)); v_nn = _mm512_add_epi32(v_nn,v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* x = amx*amz; */ /* y = amy*amz; */ /* z = dyp*amz; */ /* w = dx1*amz; */ vv[0].v16 = _mm512_mul_ps(v_amx,v_amz); vv[1].v16 = _mm512_mul_ps(v_amy,v_amz); vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz); vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz); vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp); vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp); vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp); vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit charge */ /* x = sq[nn] + amx*amz; */ /* y = sq[nn+1] + amy*amz; */ /* z = sq[nn+mxv] + dyp*amz; */ /* w = sq[nn+1+mxv] + dx1*amz; */ /* sq[nn] = x; */ /* sq[nn+1] = y; */ /* sq[nn+mxv] = z; */ /* sq[nn+1+mxv] = w; */ /* mm = nn + mxyv; */ /* x = sq[mm] + amx*dzp; */ /* y = sq[mm+1] + amy*dzp; */ /* z = sq[mm+mxv] + dyp*dzp; */ /* w = sq[mm+1+mxv] + dx1*dzp; */ /* sq[mm] = x; */ /* sq[mm+1] = y; */ /* sq[mm+mxv] = z; */ /* sq[mm+1+mxv] = w; */ for (i = 0; i < 16; i++) { nn = kk[i]; x = sq[nn] + vv[0].v[i]; y = sq[nn+1] + vv[1].v[i]; z = sq[nn+mxv] + vv[2].v[i]; w = sq[nn+1+mxv] + vv[3].v[i]; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + vv[4].v[i]; y = sq[mm+1] + vv[5].v[i]; z = sq[mm+mxv] + vv[6].v[i]; w = sq[mm+1+mxv] + vv[7].v[i]; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); amx = qm - dxp; amy = 1.0f - dyp; amz = 1.0f - dzp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amy = dxp*amy; /* deposit charge */ x = sq[nn] + amx*amz; y = sq[nn+1] + amy*amz; z = sq[nn+mxv] + dyp*amz; w = sq[nn+1+mxv] + dx1*amz; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + amx*dzp; y = sq[mm+1] + amy*dzp; z = sq[mm+mxv] + dyp*dzp; w = sq[mm+1+mxv] + dx1*dzp; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } /* deposit charge to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 16*(nn/16); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */ /* += sq[i+mxv*j+mxyv*k]; */ /* } */ for (i = 0; i < nps; i+=16) { m = i + mxv*j + mxyv*k; v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]); v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]); m = i + noff + nxv*(j + moff) + nxyv*(k + loff); v_at = _mm512_loadunpacklo_ps(v_at,&q[m]); v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]); /* skip add for first element for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&q[m],v_at); _mm512_packstorehi_ps(&q[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m ; i < nn; i++) { q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[i+mxv*j+mxyv*k]; } } } /* deposit charge to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j]; if (lm > mz) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[i+mxv*j+mxyv*(lm-1)]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)] += sq[i+mxv*(mm-1)+mxyv*k]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)] += sq[nm-1+mxv*j+mxyv*k]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)] += sq[i+mxv*(mm-1)+mxyv*(lm-1)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[mxv*j+mxyv*(lm-1)]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[nm-1+mxv*j+mxyv*(lm-1)]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgjppost3lt(float ppart[], float cu[], int kpic[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 69 flops/particle, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*vi, where i = x,y,z ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv; float edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float x, y, z; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_perm; __m512 v_qm, v_dt, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv; __m512 cp, cr; __mmask16 msk; __attribute__((aligned(64))) unsigned int kk[16]; float scu[4*MXV*MYV*MZV]; /* float scu[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qm = _mm512_set1_ps(qm); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \ vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,v_noff,v_moff,v_loff, \ v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz, \ v_dx1,v_vx,v_vy,v_vz,v_at,a,b,c,d,e,f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv, \ cp,cr,msk,kk,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); /* deposit current */ /* vx = ppart[j+3*nppmx+npoff]; */ /* vy = ppart[j+4*nppmx+npoff]; */ /* vz = ppart[j+5*nppmx+npoff]; */ v_vx = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_vy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_vz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); v_ll = _mm512_add_epi32(v_nn,v_mxyv4); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { ii = i >> 2; if (i==(ii<<2)) { switch (ii) { case 0: /* replicate velocities of first group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,0); q = _mm512_permute4f128_ps(v_vy,0); r = _mm512_permute4f128_ps(v_vz,0); /* regroup weights for first group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); break; case 1: /* replicate velocities of second group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,85); q = _mm512_permute4f128_ps(v_vy,85); r = _mm512_permute4f128_ps(v_vz,85); /* regroup weights for second group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); break; case 2: /* replicate velocities of third group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,170); q = _mm512_permute4f128_ps(v_vy,170); r = _mm512_permute4f128_ps(v_vz,170); /* regroup weights for third group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); v = _mm512_permute4f128_ps(v,78); break; case 3: /* replicate velocities of fourth group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,255); q = _mm512_permute4f128_ps(v_vy,255); r = _mm512_permute4f128_ps(v_vz,255); /* regroup weights for fourth group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); v = _mm512_permute4f128_ps(v,78); break; } } v_it = _mm512_setzero_epi32(); switch (i-(ii<<2)) { /* first particle */ case 0: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,0); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,0); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,0); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,0); break; /* second particle */ case 1: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,24); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,85); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,85); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,85); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,85); break; /* third particle */ case 2: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r, _mm512_int2mask(51),(__m512i)v_at,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,170); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,170); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,170); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,170); break; /* fourth particle */ case 3: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,177); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,255); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,255); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,255); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,255); break; } _mm512_store_epi32(kk,v_nn); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*amz; */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); _mm512_store_epi32(kk,v_ll); /* nn += 4*mxyv; */ /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*dzp; */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*dzp; */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* ppart[j+5*nppmx+npoff] = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ppart[j+3*nppmx+npoff]; vy = ppart[j+4*nppmx+npoff]; vz = ppart[j+5*nppmx+npoff]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[j+5*nppmx+npoff] = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgrjppost3lt(float ppart[], float cu[], int kpic[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation for relativistic particles in addition, particle positions are advanced a half time-step OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum vx of particle n in tile m ppart[m][4][n] = momentum vy of particle n in tile m ppart[m][5][n] = momentum vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv; float ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float p2, gami; float x, y, z, ux, uy, uz; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_perm; __m512 v_qm, v_ci2, v_dt, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_ux, v_uy, v_uz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv; __m512 cp, cr; __mmask16 msk; __attribute__((aligned(64))) unsigned int kk[16]; float scu[4*MXV*MYV*MZV]; /* float scu[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; ci2 = ci*ci; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qm = _mm512_set1_ps(qm); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \ vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,p2,gami,v_noff, \ v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx, \ v_amy,v_amz,v_dx1,v_vx,v_vy,v_vz,v_ux,v_uy,v_uz,v_gami,v_at,a,b,c,d,e, \ f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,kk,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* find inverse gamma */ /* ux = ppart[j+3*nppmx+npoff]; */ /* uy = ppart[j+4*nppmx+npoff]; */ /* uz = ppart[j+5*nppmx+npoff]; */ v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); /* p2 = ux*ux + uy*uy + uz*uz; */ v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux)); v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* calculate weights */ /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); /* deposit current */ /* vx = ux*gami; */ /* vy = uy*gami; */ /* vz = uz*gami; */ v_vx = _mm512_mul_ps(v_ux,v_gami); v_vy = _mm512_mul_ps(v_uy,v_gami); v_vz = _mm512_mul_ps(v_uz,v_gami); v_ll = _mm512_add_epi32(v_nn,v_mxyv4); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { ii = i >> 2; if (i==(ii<<2)) { switch (ii) { case 0: /* replicate velocities of first group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,0); q = _mm512_permute4f128_ps(v_vy,0); r = _mm512_permute4f128_ps(v_vz,0); /* regroup weights for first group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); break; case 1: /* replicate velocities of second group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,85); q = _mm512_permute4f128_ps(v_vy,85); r = _mm512_permute4f128_ps(v_vz,85); /* regroup weights for second group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); break; case 2: /* replicate velocities of third group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,170); q = _mm512_permute4f128_ps(v_vy,170); r = _mm512_permute4f128_ps(v_vz,170); /* regroup weights for third group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); v = _mm512_permute4f128_ps(v,78); break; case 3: /* replicate velocities of fourth group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,255); q = _mm512_permute4f128_ps(v_vy,255); r = _mm512_permute4f128_ps(v_vz,255); /* regroup weights for fourth group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); v = _mm512_permute4f128_ps(v,78); break; } } v_it = _mm512_setzero_epi32(); switch (i-(ii<<2)) { /* first particle */ case 0: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,0); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,0); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,0); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,0); break; /* second particle */ case 1: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,24); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,85); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,85); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,85); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,85); break; /* third particle */ case 2: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r, _mm512_int2mask(51),(__m512i)v_at,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,170); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,170); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,170); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,170); break; /* fourth particle */ case 3: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,177); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,255); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,255); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,255); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,255); break; } _mm512_store_epi32(kk,v_nn); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*amz; */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); _mm512_store_epi32(kk,v_ll); /* nn += 4*mxyv; */ /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*dzp; */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*dzp; */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -ux; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -uy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* ppart[j+5*nppmx+npoff] = -uz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_uz = _mm512_mask_sub_ps(v_uz,msk,v_zero,v_uz); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_uz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -ux; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -uy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; /* find inverse gamma */ ux = ppart[j+3*nppmx+npoff]; uy = ppart[j+4*nppmx+npoff]; uz = ppart[j+5*nppmx+npoff]; p2 = ux*ux + uy*uy + uz*uz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ux*gami; vy = uy*gami; vz = uz*gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -ux; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -uy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[j+5*nppmx+npoff] = -uz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -ux; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -uy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncpporder3lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppbuff[m][i][n] = i co-ordinate of particle n in tile m kpic[m] = number of particles in tile m ncl[m][i] = number of particles going to destination i, tile m ihole[m][:][0] = location of hole in array left by departing particle ihole[m][:][1] = direction destination of particle leaving hole all for tile m ihole[m][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart, ppbuff need to be 64 byte aligned nppmx, npbmx need to be a multiple of 16 local data */ int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dx, dy, dz; int ks[26]; __m512i v_ist, v_it, v_0, v_1, v_3, v_9; __m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff; __m512 v_anx, v_any, v_anz; __m512 v_dx, v_dy, v_dz, v_x; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 v_zero; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int ls[32], lm[32]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; anx = (float) nx; any = (float) ny; anz = (float) nz; v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_zero = _mm512_setzero_ps(); /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \ dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \ v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \ msk2,ls) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; noff = (ntmax+1)*l; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* dx = ppart[j+npoff]; */ /* dy = ppart[j+nppmx+npoff]; */ /* dz = ppart[j+2*nppmx+npoff]; */ v_dx = _mm512_load_ps(&ppart[j+npoff]); v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* find particles going out of bounds */ /* ist = 0; */ v_ist = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* ist = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* ist = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* ist = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* ist += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* ist += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* ist += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* ist += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* ist += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* ist += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } } /* increment counters */ /* if (ist > 0) { */ /* ncl[ist+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(ls,v_ist); for (i = 0; i < 16; i++) { ist = ls[i]; if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + i + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } } /* loop over remaining particles in tile */ for (j = nps; j < npp; j++) { dx = ppart[j+npoff]; dy = ppart[j+nppmx+npoff]; dz = ppart[j+2*nppmx+npoff]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[j+npoff] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[j+npoff] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[j+nppmx+npoff] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[j+nppmx+npoff] = dy; } else { ist += 3; } } if (dz >= edgerz) { if (dz >= anz) ppart[j+2*nppmx+npoff] = dz - anz; ist += 18; } else if (dz < edgelz) { if (dz < 0.0) { dz += anz; if (dz < anz) ist += 9; else dz = 0.0; ppart[j+2*nppmx+npoff] = dz; } else { ist += 9; } } if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*noff] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ msk1 = _mm512_int2mask(1023); v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0); v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0); #pragma omp parallel for \ private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \ v_it0,v_ioff,ls,lm) for (l = 0; l < mxyz1; l++) { npoff = idimp*nppmx*l; nboff = idimp*npbmx*l; noff = (ntmax+1)*l; /* find address offset for ordered ppbuff array */ /* isum = 0; */ /* for (j = 0; j < 26; j++) { */ /* ist = ncl[j+26*l]; */ /* ncl[j+26*l] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ /* load 26 data elements into 32 length vector with zero padding */ mm = 26*l; v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]); _mm512_store_epi32(ls,v_it); v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]); v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]); _mm512_store_epi32(&ls[16],v_is); v_ioff = _mm512_setzero_epi32(); /* vector loop over elements in blocks of 16 */ for (j = 0; j < 32; j+=16) { /* load data */ v_it0 = _mm512_load_epi32(&ls[j]); /* first pass */ v_is = _mm512_shuffle_epi32(v_it0,177); v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690), v_it0,v_is); /* second pass */ v_is = _mm512_shuffle_epi32(v_it,80); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it, v_is); /* third pass */ v_is = _mm512_permutevar_epi32(v_m1,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it, v_is); /* fourth pass */ v_is = _mm512_permutevar_epi32(v_m2,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it, v_is); /* add offset */ v_it = _mm512_add_epi32(v_it,v_ioff); /* next offset */ if (j==0) { v_ioff = _mm512_shuffle_epi32(v_it,255); v_ioff = _mm512_permute4f128_epi32(v_ioff,255); } /* subtract for exclusive scan */ v_it = _mm512_sub_epi32(v_it,v_it0); /* write data */ _mm512_store_epi32(&ls[j],v_it); } nh = ihole[2*noff]; nps = 16*(nh/16); /* nps = (nh >> 4) << 4; */ ip = 0; /* loop over particles leaving tile in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */ /* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */ mm = 2*(j+1+noff); v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]); _mm512_store_epi32(lm,v_it); mm += 16; v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]); _mm512_store_epi32(&lm[16],v_is); /* buffer particles that are leaving tile, in direction order */ for (ll = 0; ll < 16; ll++) { j1 = lm[2*ll] - 1; ist = lm[1+2*ll]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } /* store 26 data elements into ncl */ mm = 26*l; v_it = _mm512_load_epi32(ls); v_is = _mm512_load_epi32(&ls[16]); _mm512_packstorelo_epi32(&ncl[mm],v_it); _mm512_packstorehi_epi32(&ncl[mm+16],v_it); _mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is); _mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is); /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); v_m1 = _mm512_set1_epi32(nppmx); #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \ lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \ v_npp,v_x,msk1,ks,ls) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; npoff = idimp*nppmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_it0 = _mm512_set1_epi32(nh); v_is = _mm512_add_epi32(v_m2,v_it0); v_it0 = _mm512_sub_epi32(v_ioff,v_it0); v_npp = _mm512_set1_epi32(npp); for (ii = 0; ii < 26; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in this direction in groups of 16 */ for (j = 0; j < nps; j+=16) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0); msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm); v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm, v_npp); v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm, (int *)ihole,4); v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1); ih += 16; nn = ih - nh; if (nn > 0) { nn = nn < 16 ? nn : 16; npp += nn; } msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT); ll = _mm512_mask2int(_mm512_knot(msk1)); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ mm = j + ncoff + npbmx*i + nboff; v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]); v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]); if (ll==0) { _mm512_i32scatter_ps((float *)ppart,v_it,v_x,4); } else { _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it, v_x,4); } v_it = _mm512_add_epi32(v_it,v_m1); } if (ll != 0) { ist = 1; } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ /* holes with locations great than npp-ip do not need to be filled */ if (ih < nh) { ip = nh - ih; ii = nh; nn = ihole[2*(ii+noff)] - 1; v_it0 = _mm512_set1_epi32(nn); ih += 1; j2 = ihole[2*(ih+noff)] - 1; v_m2 = _mm512_sub_epi32(v_m2,v_1); /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j2 = ihole[2*(ih+noff)] - 1; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_is = _mm512_sub_epi32(v_is,v_1); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */ /* } */ kk = 0; for (ll = 0; ll < 16; ll++) { j1 = npp - j - ll - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+(ntmax+1)*l)] - 1; } else { ls[kk] = j1; kk += 1; } } v_it = _mm512_load_epi32(ls); v_it0 = _mm512_set1_epi32(kk); msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT); v_is = _mm512_add_epi32(v_is,v_m3); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ if (kk==16) { v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppart,v_is,v_x,4); } else { v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it, (float *)ppart,4); _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is, v_x,4); } v_is = _mm512_add_epi32(v_is,v_m1); v_it = _mm512_add_epi32(v_it,v_m1); } ih += kk; /* holes with locations great than npp-ip do not need to be filled */ } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*l)] - 1; } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void ckncpporderf3lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then we copy the incoming particles from other tiles into ppart. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the ckncgppushf3lt subroutine. input: all except ppbuff, irc output: ppart, ppbuff, kpic, ncl, irc ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppbuff[m][i][n] = i co-ordinate of particle n in tile m kpic[m] = number of particles in tile m ncl[m][i] = number of particles going to destination i, tile m ihole[m][:][0] = location of hole in array left by departing particle ihole[m][:][1] = direction destination of particle leaving hole all for tile m ihole[m][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart, ppbuff need to be 64 byte aligned nppmx, npbmx need to be a multiple of 16 local data */ int mxy1, mxyz1, noff, npp, npoff, nps, nboff, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; int ks[26]; __m512i v_it, v_0, v_1; __m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff; __m512 v_x, v_zero; __mmask16 msk1; __attribute__((aligned(64))) unsigned int ls[32], lm[32]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_zero = _mm512_setzero_ps(); /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ msk1 = _mm512_int2mask(1023); v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0); v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0); #pragma omp parallel for \ private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \ v_it0,v_ioff,ls,lm) for (l = 0; l < mxyz1; l++) { npoff = idimp*nppmx*l; nboff = idimp*npbmx*l; noff = (ntmax+1)*l; /* find address offset for ordered ppbuff array */ /* isum = 0; */ /* for (j = 0; j < 26; j++) { */ /* ist = ncl[j+26*l]; */ /* ncl[j+26*l] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ /* load 26 data elements into 32 length vector with zero padding */ mm = 26*l; v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]); _mm512_store_epi32(ls,v_it); v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]); v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]); _mm512_store_epi32(&ls[16],v_is); v_ioff = _mm512_setzero_epi32(); /* vector loop over elements in blocks of 16 */ for (j = 0; j < 32; j+=16) { /* load data */ v_it0 = _mm512_load_epi32(&ls[j]); /* first pass */ v_is = _mm512_shuffle_epi32(v_it0,177); v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690), v_it0,v_is); /* second pass */ v_is = _mm512_shuffle_epi32(v_it,80); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it, v_is); /* third pass */ v_is = _mm512_permutevar_epi32(v_m1,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it, v_is); /* fourth pass */ v_is = _mm512_permutevar_epi32(v_m2,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it, v_is); /* add offset */ v_it = _mm512_add_epi32(v_it,v_ioff); /* next offset */ if (j==0) { v_ioff = _mm512_shuffle_epi32(v_it,255); v_ioff = _mm512_permute4f128_epi32(v_ioff,255); } /* subtract for exclusive scan */ v_it = _mm512_sub_epi32(v_it,v_it0); /* write data */ _mm512_store_epi32(&ls[j],v_it); } nh = ihole[2*noff]; nps = 16*(nh/16); /* nps = (nh >> 4) << 4; */ ip = 0; /* loop over particles leaving tile in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */ /* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */ mm = 2*(j+1+noff); v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]); _mm512_store_epi32(lm,v_it); mm += 16; v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]); _mm512_store_epi32(&lm[16],v_is); /* buffer particles that are leaving tile, in direction order */ for (ll = 0; ll < 16; ll++) { j1 = lm[2*ll] - 1; ist = lm[1+2*ll]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } /* store 26 data elements into ncl */ mm = 26*l; v_it = _mm512_load_epi32(ls); v_is = _mm512_load_epi32(&ls[16]); _mm512_packstorelo_epi32(&ncl[mm],v_it); _mm512_packstorehi_epi32(&ncl[mm+16],v_it); _mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is); _mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is); /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); v_m1 = _mm512_set1_epi32(nppmx); #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \ lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \ v_npp,v_x,msk1,ks,ls) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; npoff = idimp*nppmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_it0 = _mm512_set1_epi32(nh); v_is = _mm512_add_epi32(v_m2,v_it0); v_it0 = _mm512_sub_epi32(v_ioff,v_it0); v_npp = _mm512_set1_epi32(npp); for (ii = 0; ii < 26; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in this direction in groups of 16 */ for (j = 0; j < nps; j+=16) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0); msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm); v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm, v_npp); v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm, (int *)ihole,4); v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1); ih += 16; nn = ih - nh; if (nn > 0) { nn = nn < 16 ? nn : 16; npp += nn; } msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT); ll = _mm512_mask2int(_mm512_knot(msk1)); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ mm = j + ncoff + npbmx*i + nboff; v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]); v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]); if (ll==0) { _mm512_i32scatter_ps((float *)ppart,v_it,v_x,4); } else { _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it, v_x,4); } v_it = _mm512_add_epi32(v_it,v_m1); } if (ll != 0) { ist = 1; } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ /* holes with locations great than npp-ip do not need to be filled */ if (ih < nh) { ip = nh - ih; ii = nh; nn = ihole[2*(ii+noff)] - 1; v_it0 = _mm512_set1_epi32(nn); ih += 1; j2 = ihole[2*(ih+noff)] - 1; v_m2 = _mm512_sub_epi32(v_m2,v_1); /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j2 = ihole[2*(ih+noff)] - 1; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_is = _mm512_sub_epi32(v_is,v_1); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */ /* } */ kk = 0; for (ll = 0; ll < 16; ll++) { j1 = npp - j - ll - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+(ntmax+1)*l)] - 1; } else { ls[kk] = j1; kk += 1; } } v_it = _mm512_load_epi32(ls); v_it0 = _mm512_set1_epi32(kk); msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT); v_is = _mm512_add_epi32(v_is,v_m3); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ if (kk==16) { v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppart,v_is,v_x,4); } else { v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it, (float *)ppart,4); _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is, v_x,4); } v_is = _mm512_add_epi32(v_is,v_m1); v_it = _mm512_add_epi32(v_it,v_m1); } ih += kk; /* holes with locations great than npp-ip do not need to be filled */ } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*l)] - 1; } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void cknccguard3l(float fxyz[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* replicate extended periodic vector field fxyz linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, fxyz needs to be 64 byte aligned nxe needs to be a multiple of 4 local data */ #define N 4 int j, k, l, nxs, nxyen, ll; nxs = 4*(nx/4); nxyen = N*nxe*nye; /* copy edges of extended field */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,ll) for (l = 0; l < nz; l++) { ll = nxyen*l; for (k = 0; k < ny; k++) { fxyz[N*nx+N*nxe*k+ll] = fxyz[N*nxe*k+ll]; fxyz[1+N*nx+N*nxe*k+ll] = fxyz[1+N*nxe*k+ll]; fxyz[2+N*nx+N*nxe*k+ll] = fxyz[2+N*nxe*k+ll]; } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+ll], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+ll])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[N*j+N*nxe*ny+ll] = fxyz[N*j+ll]; fxyz[1+N*j+N*nxe*ny+ll] = fxyz[1+N*j+ll]; fxyz[2+N*j+N*nxe*ny+ll] = fxyz[2+N*j+ll]; } fxyz[N*nx+N*nxe*ny+ll] = fxyz[ll]; fxyz[1+N*nx+N*nxe*ny+ll] = fxyz[1+ll]; fxyz[2+N*nx+N*nxe*ny+ll] = fxyz[2+ll]; } #pragma omp for \ private(j,k) for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[N*j+N*nxe*k+nxyen*nz], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+N*nxe*k])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[N*j+N*nxe*k+nxyen*nz] = fxyz[N*j+N*nxe*k]; fxyz[1+N*j+N*nxe*k+nxyen*nz] = fxyz[1+N*j+N*nxe*k]; fxyz[2+N*j+N*nxe*k+nxyen*nz] = fxyz[2+N*j+N*nxe*k]; } fxyz[N*nx+N*nxe*k+nxyen*nz] = fxyz[N*nxe*k]; fxyz[1+N*nx+N*nxe*k+nxyen*nz] = fxyz[1+N*nxe*k]; fxyz[2+N*nx+N*nxe*k+nxyen*nz] = fxyz[2+N*nxe*k]; } } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+nxyen*nz], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[N*j+N*nxe*ny+nxyen*nz] = fxyz[N*j]; fxyz[1+N*j+N*nxe*ny+nxyen*nz] = fxyz[1+N*j]; fxyz[2+N*j+N*nxe*ny+nxyen*nz] = fxyz[2+N*j]; } fxyz[N*nx+N*nxe*ny+nxyen*nz] = fxyz[0]; fxyz[1+N*nx+N*nxe*ny+nxyen*nz] = fxyz[1]; fxyz[2+N*nx+N*nxe*ny+nxyen*nz] = fxyz[2]; return; #undef N } /*--------------------------------------------------------------------*/ void ckncacguard3l(float cu[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* accumulate extended periodic field cu linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, fxyz needs to be 64 byte aligned nxe needs to be a multiple of 4 local data */ int j, k, l, nxs, nxyen, ll; __m512 v_cu, v_zero; nxs = 4*(nx/4); nxyen = 4*nxe*nye; v_zero = _mm512_set1_ps(0.0f); /* accumulate edges of extended field */ for (l = 0; l < nz; l++) { ll = nxyen*l; for (k = 0; k < ny; k++) { cu[4*nxe*k+ll] += cu[4*nx+4*nxe*k+ll]; cu[1+4*nxe*k+ll] += cu[1+4*nx+4*nxe*k+ll]; cu[2+4*nxe*k+ll] += cu[2+4*nx+4*nxe*k+ll]; cu[4*nx+4*nxe*k+ll] = 0.0; cu[1+4*nx+4*nxe*k+ll] = 0.0; cu[2+4*nx+4*nxe*k+ll] = 0.0; } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+ll]); v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+ll]),v_cu); _mm512_store_ps(&cu[4*j+ll],v_cu); _mm512_store_ps(&cu[4*j+4*nxe*ny+ll],v_zero); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { cu[4*j+ll] += cu[4*j+4*nxe*ny+ll]; cu[1+4*j+ll] += cu[1+4*j+4*nxe*ny+ll]; cu[2+4*j+ll] += cu[2+4*j+4*nxe*ny+ll]; cu[4*j+4*nxe*ny+ll] = 0.0; cu[1+4*j+4*nxe*ny+ll] = 0.0; cu[2+4*j+4*nxe*ny+ll] = 0.0; } cu[ll] += cu[4*nx+4*nxe*ny+ll]; cu[1+ll] += cu[1+4*nx+4*nxe*ny+ll]; cu[2+ll] += cu[2+4*nx+4*nxe*ny+ll]; cu[4*nx+4*nxe*ny+ll] = 0.0; cu[1+4*nx+4*nxe*ny+ll] = 0.0; cu[2+4*nx+4*nxe*ny+ll] = 0.0; } for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { v_cu = _mm512_load_ps(&cu[4*j+4*nxe*k+nxyen*nz]); v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+4*nxe*k]),v_cu); _mm512_store_ps(&cu[4*j+4*nxe*k],v_cu); _mm512_store_ps(&cu[4*j+4*nxe*k+nxyen*nz],v_zero); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { cu[4*j+4*nxe*k] += cu[4*j+4*nxe*k+nxyen*nz]; cu[1+4*j+4*nxe*k] += cu[1+4*j+4*nxe*k+nxyen*nz]; cu[2+4*j+4*nxe*k] += cu[2+4*j+4*nxe*k+nxyen*nz]; cu[4*j+4*nxe*k+nxyen*nz] = 0.0; cu[1+4*j+4*nxe*k+nxyen*nz] = 0.0; cu[2+4*j+4*nxe*k+nxyen*nz] = 0.0; } cu[4*nxe*k] += cu[4*nx+4*nxe*k+nxyen*nz]; cu[1+4*nxe*k] += cu[1+4*nx+4*nxe*k+nxyen*nz]; cu[2+4*nxe*k] += cu[2+4*nx+4*nxe*k+nxyen*nz]; cu[4*nx+4*nxe*k+nxyen*nz] = 0.0; cu[1+4*nx+4*nxe*k+nxyen*nz] = 0.0; cu[2+4*nx+4*nxe*k+nxyen*nz] = 0.0; } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+nxyen*nz]); v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j]),v_cu); _mm512_store_ps(&cu[4*j],v_cu); _mm512_store_ps(&cu[4*j+4*nxe*ny+nxyen*nz],v_zero); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { cu[4*j] += cu[4*j+4*nxe*ny+nxyen*nz]; cu[1+4*j] += cu[1+4*j+4*nxe*ny+nxyen*nz]; cu[2+4*j] += cu[2+4*j+4*nxe*ny+nxyen*nz]; cu[4*j+4*nxe*ny+nxyen*nz] = 0.0; cu[1+4*j+4*nxe*ny+nxyen*nz] = 0.0; cu[2+4*j+4*nxe*ny+nxyen*nz] = 0.0; } cu[0] += cu[4*nx+4*nxe*ny+nxyen*nz]; cu[1] += cu[1+4*nx+4*nxe*ny+nxyen*nz]; cu[2] += cu[2+4*nx+4*nxe*ny+nxyen*nz]; cu[4*nx+4*nxe*ny+nxyen*nz] = 0.0; cu[1+4*nx+4*nxe*ny+nxyen*nz] = 0.0; cu[2+4*nx+4*nxe*ny+nxyen*nz] = 0.0; return; } /*--------------------------------------------------------------------*/ void ckncaguard3l(float q[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* accumulate extended periodic scalar field q linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, q needs to be 64 byte aligned nxe needs to be a multiple of 16 local data */ int j, k, l, nxs, nxye, ll; __m512 v_q; nxs = 16*(nx/16); nxye = nxe*nye; /* accumulate edges of extended field */ #pragma omp parallel { #pragma omp for \ private(j,k,l,ll,v_q) for (l = 0; l < nz; l++) { ll = nxye*l; for (k = 0; k < ny; k++) { q[nxe*k+ll] += q[nx+nxe*k+ll]; q[nx+nxe*k+ll] = 0.0; } /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*ny+ll]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j+ll]),v_q); _mm512_store_ps(&q[j+ll],v_q); _mm512_store_ps(&q[j+nxe*ny+ll],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j+ll] += q[j+nxe*ny+ll]; q[j+nxe*ny+ll] = 0.0; } q[ll] += q[nx+nxe*ny+ll]; q[nx+nxe*ny+ll] = 0.0; } #pragma omp for \ private(j,k,v_q) for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*k+nxye*nz]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j+nxe*k]),v_q); _mm512_store_ps(&q[j+nxe*k],v_q); _mm512_store_ps(&q[j+nxe*k+nxye*nz],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j+nxe*k] += q[j+nxe*k+nxye*nz]; q[j+nxe*k+nxye*nz] = 0.0; } q[nxe*k] += q[nx+nxe*k+nxye*nz]; q[nx+nxe*k+nxye*nz] = 0.0; } } /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*ny+nxye*nz]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j]),v_q); _mm512_store_ps(&q[j],v_q); _mm512_store_ps(&q[j+nxe*ny+nxye*nz],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j] += q[j+nxe*ny+nxye*nz]; q[j+nxe*ny+nxye*nz] = 0.0; } q[0] += q[nx+nxe*ny+nxye*nz]; q[nx+nxe*ny+nxye*nz] = 0.0; return; } /*--------------------------------------------------------------------*/ void ckncmpois33(float complex q[], float complex fxyz[], int isign, float complex ffc[], float ax, float ay, float az, float affp, float *we, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions. for isign = 0, output: ffc input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd for isign = -1, output: fxyz, we input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd approximate flop count is: 59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc) where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated equation used is: fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx], fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx], fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx], where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx], s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0, fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0, fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0, fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0. q[l][k][j] = complex charge density for fourier mode (j,k,l) fxyz[l][k][j][0] = x component of complex force/charge fxyz[l][k][j][1] = y component of complex force/charge fxyz[l][k][j][2] = z component of complex force/charge all for fourier mode (j,k,l) cimag(ffc[l][k][j]) = finite-size particle shape factor s for fourier mode (j,k,l) creal(ffc[l][k][j]) = potential green's function g for fourier mode (j,k,l) ax/ay/az = half-width of particle in x/y/z direction affp = normalization constant = nx*ny*nz/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))* |q[kz][ky][kx]*s[kz][ky][kx]|**2) nx/ny/nz = system length in x/y/z direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nzv = third dimension of field arrays, must be >= nz nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh nzhd = third dimension of form factor array, must be >= nzh requires KNC, q, fxy, ffc need to be 64 byte aligned nxhd, nxvh need to be a multiple of 8 fxyz needs to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6; float complex zero, zt1, zt2; double wp, sum1, sum2; __m512i v_j, v_it, v_perm; __m512 v_dnx, v_dny, v_dnz, v_dky, v_dkz, v_at1, v_at2, v_at3, v_at4; __m512 v_zero, v_zt1, v_zt2, v_zt3, v_zt4; __m512 a, b, c, d, e, f, g, h; __m512d v_wp, v_d; __attribute__((aligned(64))) double dd[8]; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 8*(nxh/8); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_perm = _mm512_set_epi32(15,14,11,10,7,6,3,2,13,12,9,8,5,4,1,0); if (isign != 0) goto L40; /* prepare form factor array */ for (l = 0; l < nzh; l++) { dkz = dnz*(float) l; ll = nxyhd*l; at1 = dkz*dkz; at2 = pow((dkz*az),2); for (k = 0; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; at3 = dky*dky + at1; at4 = pow((dky*ay),2) + at2; for (j = 0; j < nxh; j++) { dkx = dnx*(float) j; at5 = dkx*dkx + at3; at6 = exp(-0.5*(pow((dkx*ax),2) + at4)); if (at5==0.0) { ffc[j+kk+ll] = affp + 1.0*_Complex_I; } else { ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I; } } } } return; /* calculate force/charge and sum field energy */ L40: sum1 = 0.0; /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp, \ v_it,v_dky,v_dkz,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,a,b, \ c,d,e,f,g,h,v_d,v_wp,dd) \ reduction(+:sum1) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps( _mm512_set1_epi32(l),_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; wp = 0.0; v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); */ v_at1 = _mm512_load_ps((float *)&ffc[j+kk+ll]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* at3 = dky*at1; */ v_at3 = _mm512_mul_ps(v_dky,v_at1); /* at4 = dkz*at1; */ v_at4 = _mm512_mul_ps(v_dkz,v_at1); /* zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+kj+lj]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+k1+lj]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+kj+lj)] = at2*zt1; */ /* fxyz[1+4*(j+kj+lj)] = at3*zt1; */ /* fxyz[2+4*(j+kj+lj)] = at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = _mm512_mul_ps(v_at3,v_zt1); c = _mm512_mul_ps(v_at4,v_zt1); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+kj+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+kj+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+kj+lj)],d); /* fxyz[4*(j+k1+lj)] = at2*zt2; */ /* fxyz[1+4*(j+k1+lj)] = -at3*zt2; */ /* fxyz[2+4*(j+k1+lj)] = at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2)); c = _mm512_mul_ps(v_at4,v_zt2); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],d); /* wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) */ /* + q[j+k1+lj]*conjf(q[j+k1+lj])); */ v_zt3 = _mm512_mul_ps(v_zt1,v_zt1); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_mul_ps(v_at1,v_zt3); /* zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+kj+l1]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+k1+l1]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+kj+l1)] = at2*zt1; */ /* fxyz[1+4*(j+kj+l1)] = at3*zt1; */ /* fxyz[2+4*(j+kj+l1)] = -at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = _mm512_mul_ps(v_at3,v_zt1); c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt1)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],d); /* fxyz[4*(j+k1+l1)] = at2*zt2; */ /* fxyz[1+4*(j+k1+l1)] = -at3*zt2; */ /* fxyz[2+4*(j+k1+l1)] = -at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2)); c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],d); /* wp += at1*(q[j+kj+l1]*conjf(q[j+kj+l1]) */ /* + q[j+k1+l1]*conjf(q[j+k1+l1])); */ v_zt4 = _mm512_mul_ps(v_zt1,v_zt1); v_zt4 = _mm512_add_ps(v_zt4,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_at1,v_zt4)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); at2 = at1*dnx*(float) j; at3 = dky*at1; at4 = dkz*at1; zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; fxyz[4*(j+kj+lj)] = at2*zt1; fxyz[1+4*(j+kj+lj)] = at3*zt1; fxyz[2+4*(j+kj+lj)] = at4*zt1; fxyz[4*(j+k1+lj)] = at2*zt2; fxyz[1+4*(j+k1+lj)] = -at3*zt2; fxyz[2+4*(j+k1+lj)] = at4*zt2; zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; fxyz[4*(j+kj+l1)] = at2*zt1; fxyz[1+4*(j+kj+l1)] = at3*zt1; fxyz[2+4*(j+kj+l1)] = -at4*zt1; fxyz[4*(j+k1+l1)] = at2*zt2; fxyz[1+4*(j+k1+l1)] = -at3*zt2; fxyz[2+4*(j+k1+l1)] = -at4*zt2; at1 = at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) + q[j+k1+lj]*conjf(q[j+k1+lj]) + q[j+kj+l1]*conjf(q[j+kj+l1]) + q[j+k1+l1]*conjf(q[j+k1+l1])); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]); at3 = at1*dny*(float) k; at4 = dkz*at1; zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I; zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I; fxyz[4*(kj+lj)] = zero; fxyz[1+4*(kj+lj)] = at3*zt1; fxyz[2+4*(kj+lj)] = at4*zt1; fxyz[4*(k1+lj)] = zero; fxyz[1+4*(k1+lj)] = zero; fxyz[2+4*(k1+lj)] = zero; fxyz[4*(kj+l1)] = zero; fxyz[1+4*(kj+l1)] = at3*zt2; fxyz[2+4*(kj+l1)] = -at4*zt2; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[kj+lj]*conjf(q[kj+lj]) + q[kj+l1]*conjf(q[kj+l1])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); */ v_at1 = _mm512_load_ps((float *)&ffc[j+ll]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* at4 = dkz*at1; */ v_at4 = _mm512_mul_ps(v_dkz,v_at1); /* zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+lj]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+l1]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+lj)] = at2*zt1; */ /* fxyz[1+4*(j+lj)] = zero; */ /* fxyz[2+4*(j+lj)] = at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = v_zero; c = _mm512_mul_ps(v_at4,v_zt1); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c, 78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a, 78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255), b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+lj)],d); /* fxyz[4*(j+k1+lj)] = zero; */ /* fxyz[1+4*(j+k1+lj)] = zero; */ /* fxyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],v_zero); /* fxyz[4*(j+l1)] = at2*zt2; */ /* fxyz[1+4*(j+l1)] = zero; */ /* fxyz[2+4*(j+l1)] = -at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = v_zero; c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c, 78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a, 78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255), b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+l1)],d); /* fxyz[4*(j+k1+l1)] = zero; */ /* fxyz[1+4*(j+k1+l1)] = zero; */ /* fxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero); /* wp += at1*(q[j+lj]*conjf(q[j+lj]) */ /* + q[j+l1]*conjf(q[j+l1])); */ v_zt3 = _mm512_mul_ps(v_zt1,v_zt1); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_mul_ps(v_at1,v_zt3); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); at2 = at1*dnx*(float) j; at4 = dkz*at1; zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; fxyz[4*(j+lj)] = at2*zt1; fxyz[1+4*(j+lj)] = zero; fxyz[2+4*(j+lj)] = at4*zt1; fxyz[4*(j+k1+lj)] = zero; fxyz[1+4*(j+k1+lj)] = zero; fxyz[2+4*(j+k1+lj)] = zero; fxyz[4*(j+l1)] = at2*zt2; fxyz[1+4*(j+l1)] = zero; fxyz[2+4*(j+l1)] = -at4*zt2; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j+lj]*conjf(q[j+lj]) + q[j+l1]*conjf(q[j+l1])); wp += (double) at1; } /* mode numbers kx = 0, nx/2 */ at1 = crealf(ffc[ll])*cimagf(ffc[ll]); at4 = dkz*at1; zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I; fxyz[4*lj] = zero; fxyz[1+4*lj] = zero; fxyz[2+4*lj] = at4*zt1; fxyz[4*(k1+lj)] = zero; fxyz[1+4*(k1+lj)] = zero; fxyz[2+4*(k1+lj)] = zero; fxyz[4*l1] = zero; fxyz[1+4*l1] = zero; fxyz[2+4*l1] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[lj]*conjf(q[lj])); wp += (double) at1; /* sum1 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum1 += (wp + dd[0]); } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum2 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \ reduction(+:sum2) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; fxyz[4*(j+kj)] = at2*zt1; fxyz[1+4*(j+kj)] = at3*zt1; fxyz[2+4*(j+kj)] = zero; fxyz[4*(j+k1)] = at2*zt2; fxyz[1+4*(j+k1)] = -at3*zt2; fxyz[2+4*(j+k1)] = zero; fxyz[4*(j+kj+l1)] = zero; fxyz[1+4*(j+kj+l1)] = zero; fxyz[2+4*(j+kj+l1)] = zero; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); wp += (double) at1; } sum2 += wp; } /* mode numbers kx = 0, nx/2 */ wp = 0.0; v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk])*cimagf(ffc[kk]); at3 = at1*dny*(float) k; zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I; fxyz[4*kj] = zero; fxyz[1+4*kj] = at3*zt1; fxyz[2+4*kj] = zero; fxyz[4*k1] = zero; fxyz[1+4*k1] = zero; fxyz[2+4*k1] = zero; fxyz[4*(kj+l1)] = zero; fxyz[1+4*(kj+l1)] = zero; fxyz[2+4*(kj+l1)] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[kj]*conjf(q[kj])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j])*cimagf(ffc[j]); */ v_at1 = _mm512_load_ps((float *)&ffc[j]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),v_zero, v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),v_zero); } /* fxyz[4*j] = at2*zt1; */ /* fxyz[1+4*j] = zero; */ /* fxyz[2+4*j] = zero; */ a = _mm512_mul_ps(v_at2,v_zt1); b = v_zero; c = v_zero; /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),v_zero, 177); b = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),v_zero, 177); d = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*j],a); _mm512_store_ps((float *)&fxyz[8+4*j],b); _mm512_store_ps((float *)&fxyz[16+4*j],c); _mm512_store_ps((float *)&fxyz[24+4*j],d); /* fxyz[4*(j+k1)] = zero; */ /* fxyz[1+4*(j+k1)] = zero; */ /* fxyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1)],v_zero); /* fxyz[4*(j+l1)] = zero; */ /* fxyz[1+4*(j+l1)] = zero; */ /* fxyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+l1)],v_zero); /* fxyz[4*(j+k1+l1)] = zero; */ /* fxyz[1+4*(j+k1+l1)] = zero; */ /* fxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero); /* wp += at1*(q[j]*conjf(q[j])); */ v_zt3 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_zt1,v_zt1)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j])*cimagf(ffc[j]); at2 = at1*dnx*(float) j; zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; fxyz[4*j] = at2*zt1; fxyz[1+4*j] = zero; fxyz[2+4*j] = zero; fxyz[4*(j+k1)] = zero; fxyz[1+4*(j+k1)] = zero; fxyz[2+4*(j+k1)] = zero; fxyz[4*(j+l1)] = zero; fxyz[1+4*(j+l1)] = zero; fxyz[2+4*(j+l1)] = zero; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j]*conjf(q[j])); wp += (double) at1; } fxyz[0] = zero; fxyz[1] = zero; fxyz[2] = zero; fxyz[4*k1] = zero; fxyz[1+4*k1] = zero; fxyz[2+4*k1] = zero; fxyz[4*l1] = zero; fxyz[1+4*l1] = zero; fxyz[2+4*l1] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; /* sum2 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (wp + dd[0]); /* *we = wp*((float) nx)*((float) ny)*((float) nz); */ *we = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void cknccuperp3(float complex cu[], int nx, int ny, int nz, int nxvh, int nyv, int nzv) { /* this subroutine calculates the transverse current in fourier space input: all, output: cu approximate flop count is: 100*nxc*nyc*nzc + 36*(nxc*nyc + nxc*nzc + nyc*nzc) and (nx/2)*nyc*nzc divides where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the transverse current is calculated using the equation: cux[kz][ky][kx] = cux[kz][ky][kx] - kx*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) cuy([kz][ky][kx] = cuy[kz][ky][kx] - ky*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) cuz[kz][ky][kx] = cuz[kz][ky][kx] - kz*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, except for cux(kx=pi) = cuy(kx=pi) = cuz(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = cux(ky=pi) = 0, cux(kz=pi) = cuy(kz=pi) = cuz(kz=pi) = 0, cux(kx=0,ky=0,kz=0) = cuy(kx=0,ky=0,kz=0) = cuz(kx=0,ky=0,kz=0) = 0. cu[l][k][j][i] = complex current density for fourier mode (j,k,l) nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz requires KNC, cu need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 cu needs to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kj, lj, nxvyh; float dnx, dny, dnz, dkx, dky, dkz, dky2, dkz2, dkyz2, at1; float complex zero, zt1; __m512i v_j, v_it; __m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_dkz2, v_dkyz2; __m512 v_dk, v_at1, v_zt1, v_zt2, v_zero, v_one, v_at, v_as; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_one = _mm512_set1_ps(1.0f); /* calculate transverse part of current */ /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l), _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); lj = nxvyh*l; l1 = nxvyh*nz - lj; dkz2 = dkz*dkz; v_dkz2 = _mm512_set1_ps(dkz2); /* add kz to gradient operator */ v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkz); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kj = nxvh*k; k1 = nxvh*ny - kj; dkyz2 = dky*dky + dkz2; v_dkyz2 = _mm512_fmadd_ps(v_dky,v_dky,v_dkz2); /* add ky to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* at1 = 1.0/(dkx*dkx + dkyz2); */ v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2); v_at1 = _mm512_div_ps(v_one,v_at1); /* add kx to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx); /* zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)] */ /* + dkz*cu[2+4*(j+kj+lj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]); v_zt1 = _mm512_mul_ps(v_dk,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+kj+lj)] -= dkx*zt1; */ /* cu[1+4*(j+kj+lj)] -= dky*zt1; */ /* cu[2+4*(j+kj+lj)] -= dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+kj+lj)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)] */ /* + dkz*cu[2+4*(j+k1+lj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero, v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+k1+lj)] -= dkx*zt1; */ /* cu[1+4*(j+k1+lj)] += dky*zt1; */ /* cu[2+4*(j+k1+lj)] -= dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)] */ /* - dkz*cu[2+4*(j+kj+l1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336), v_zero,v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+kj+l1)] -= dkx*zt1; */ /* cu[1+4*(j+kj+l1)] -= dky*zt1; */ /* cu[2+4*(j+kj+l1)] += dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)] */ /* - dkz*cu[2+4*(j+k1+l1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(15420), v_zero,v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+k1+l1)] -= dkx*zt1; */ /* cu[1+4*(j+k1+l1)] += dky*zt1; */ /* cu[2+4*(j+k1+l1)] += dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dkyz2); zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)] + dkz*cu[2+4*(j+kj+lj)]); cu[4*(j+kj+lj)] -= dkx*zt1; cu[1+4*(j+kj+lj)] -= dky*zt1; cu[2+4*(j+kj+lj)] -= dkz*zt1; zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)] + dkz*cu[2+4*(j+k1+lj)]); cu[4*(j+k1+lj)] -= dkx*zt1; cu[1+4*(j+k1+lj)] += dky*zt1; cu[2+4*(j+k1+lj)] -= dkz*zt1; zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)] - dkz*cu[2+4*(j+kj+l1)]); cu[4*(j+kj+l1)] -= dkx*zt1; cu[1+4*(j+kj+l1)] -= dky*zt1; cu[2+4*(j+kj+l1)] += dkz*zt1; zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)] - dkz*cu[2+4*(j+k1+l1)]); cu[4*(j+k1+l1)] -= dkx*zt1; cu[1+4*(j+k1+l1)] += dky*zt1; cu[2+4*(j+k1+l1)] += dkz*zt1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kj = nxvh*k; k1 = nxvh*ny - kj; dky = dny*(float) k; at1 = 1.0/(dky*dky + dkz2); zt1 = at1*(dky*cu[1+4*(kj+lj)] + dkz*cu[2+4*(kj+lj)]); cu[1+4*(kj+lj)] -= dky*zt1; cu[2+4*(kj+lj)] -= dkz*zt1; cu[4*(k1+lj)] = zero; cu[1+4*(k1+lj)] = zero; cu[2+4*(k1+lj)] = zero; zt1 = at1*(dky*cu[1+4*(kj+l1)] - dkz*cu[2+4*(kj+l1)]); cu[1+4*(kj+l1)] -= dky*zt1; cu[2+4*(kj+l1)] += dkz*zt1; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* add ky to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_zero); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* at1 = 1.0/(dkx*dkx + dkz2); */ v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkz2); v_at1 = _mm512_div_ps(v_one,v_at1); /* add kx to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx); /* zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]); v_zt1 = _mm512_mul_ps(v_dk,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+lj)] -= dkx*zt1; */ /* cu[2+4*(j+lj)] -= dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+lj)],v_zt2); /* cu[4*(j+k1+lj)] = zero; */ /* cu[1+4*(j+k1+lj)] = zero; */ /* cu[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zero); /* zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336), v_zero,v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+l1)] -= dkx*zt1; */ /* cu[2+4*(j+l1)] += dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+l1)],v_zt2); /* cu[4*(j+k1+l1)] = zero; */ /* cu[1+4*(j+k1+l1)] = zero; */ /* cu[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dkz2); zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]); cu[4*(j+lj)] -= dkx*zt1; cu[2+4*(j+lj)] -= dkz*zt1; cu[4*(j+k1+lj)] = zero; cu[1+4*(j+k1+lj)] = zero; cu[2+4*(j+k1+lj)] = zero; zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]); cu[4*(j+l1)] -= dkx*zt1; cu[2+4*(j+l1)] += dkz*zt1; cu[4*(j+k1+l1)] = zero; cu[1+4*(j+k1+l1)] = zero; cu[2+4*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ cu[2+4*lj] = zero; cu[4*(k1+lj)] = zero; cu[1+4*(k1+lj)] = zero; cu[2+4*(k1+lj)] = zero; cu[4*l1] = zero; cu[1+4*l1] = zero; cu[2+4*l1] = zero; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kj = nxvh*k; k1 = nxvh*ny - kj; dky2 = dky*dky; v_dkyz2 = _mm512_mul_ps(v_dky,v_dky); /* add ky to gradient operator */ v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* at1 = 1.0/(dkx*dkx + dky2); */ v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2); v_at1 = _mm512_div_ps(v_one,v_at1); /* add kx to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx); /* zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]); v_zt1 = _mm512_mul_ps(v_dk,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+kj)] -= dkx*zt1; */ /* cu[1+4*(j+kj)] -= dky*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+kj)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero, v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero);; } /* cu[4*(j+k1)] -= dkx*zt1; */ /* cu[1+4*(j+k1)] += dky*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+k1)],v_zt2); /* cu[4*(j+kj+l1)] = zero; */ /* cu[1+4*(j+kj+l1)] = zero; */ /* cu[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zero); /* cu[4*(j+k1+l1)] = zero; */ /* cu[1+4*(j+k1+l1)] = zero; */ /* cu[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dky2); zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]); cu[4*(j+kj)] -= dkx*zt1; cu[1+4*(j+kj)] -= dky*zt1; zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]); cu[4*(j+k1)] -= dkx*zt1; cu[1+4*(j+k1)] += dky*zt1; cu[4*(j+kj+l1)] = zero; cu[1+4*(j+kj+l1)] = zero; cu[2+4*(j+kj+l1)] = zero; cu[4*(j+k1+l1)] = zero; cu[1+4*(j+k1+l1)] = zero; cu[2+4*(j+k1+l1)] = zero; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kj = nxvh*k; k1 = nxvh*ny - kj; cu[1+4*kj] = zero; cu[4*k1] = zero; cu[1+4*k1] = zero; cu[2+4*k1] = zero; cu[4*(kj+l1)] = zero; cu[1+4*(kj+l1)] = zero; cu[2+4*(kj+l1)] = zero; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { v_zt2 = _mm512_load_ps((float *)&cu[4*j]); /* zero out kx = 0 mode */ if (j==0) { v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(255),v_zero); } /* cu[4*j] = zero; */ v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(771),v_zero); _mm512_store_ps((float *)&cu[4*j],v_zt2); /* cu[4*(j+k1)] = zero; */ /* cu[1+4*(j+k1)] = zero; */ /* cu[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1)],v_zero); /* cu[4*(j+l1)] = zero; */ /* cu[1+4*(j+l1)] = zero; */ /* cu[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+l1)],v_zero); /* cu[4*(j+k1+l1)] = zero; */ /* cu[1+4*(j+k1+l1)] = zero; */ /* cu[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { cu[4*j] = zero; cu[4*(j+k1)] = zero; cu[1+4*(j+k1)] = zero; cu[2+4*(j+k1)] = zero; cu[4*(j+l1)] = zero; cu[1+4*(j+l1)] = zero; cu[2+4*(j+l1)] = zero; cu[4*(j+k1+l1)] = zero; cu[1+4*(j+k1+l1)] = zero; cu[2+4*(j+k1+l1)] = zero; } cu[0] = zero; cu[1] = zero; cu[2] = zero; cu[4*k1] = zero; cu[1+4*k1] = zero; cu[2+4*k1] = zero; cu[4*l1] = zero; cu[1+4*l1] = zero; cu[2+4*l1] = zero; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; return; } /*--------------------------------------------------------------------*/ void ckncibpois33(float complex cu[], float complex bxyz[], float complex ffc[], float ci, float *wm, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d poisson's equation in fourier space for magnetic field with periodic boundary conditions. input: cu,ffc,ci,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd output: bxyz, wm approximate flop count is: 193*nxc*nyc*nzc + 84*(nxc*nyc + nxc*nzc + nyc*nzc) where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the magnetic field is calculated using the equations: bx[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (ky*cuz[kz][ky][kx]-kz*cuy[kz][ky][kx]), by[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (kz*cux[kz][ky][kx]-kx*cuz[kz][ky][kx]), bz[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (kx*cuy[kz][ky][kx]-ky*cux[kz][ky][kx]), where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s(kx,ky,kz), s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for bx(kx=pi) = by(kx=pi) = bz(kx=pi) = 0, bx(ky=pi) = by(ky=pi) = bx(ky=pi) = 0, bx(kz=pi) = by(kz=pi) = bz(kz=pi) = 0, bx(kx=0,ky=0,kz=0) = by(kx=0,ky=0,kz=0) = bz(kx=0,ky=0,kz=0) = 0. cu[l][k][j][i] = complex current density for fourier mode (j,k,l) bxyz[l][k][j][i] = i component of complex magnetic field all for fourier mode (j,k,l) aimag(ffc(j,k,l)) = finite-size particle shape factor s for fourier mode (j,k,l) real(ffc(j,k,l)) = potential green's function g for fourier mode (j,k,l) ci = reciprocal of velocity of light magnetic field energy is also calculated, using wm = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*ci*ci |cu[kz][ky][kx]*s[kz][ky][kx]|**2) this expression is valid only if the current is divergence-free nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz nxhd = dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh nzhd = third dimension of form factor array, must be >= nzh requires KNC, cu, bxyz, ffc need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 cu, bxyz need to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float dnx, dny, dnz, dky, dkz, ci2, at1, at2, at3, at4; float complex zero, zt1, zt2, zt3; double wp, d0; __m512i v_j, v_it, v_n, v_m; __m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_ci2; __m512 v_dk1, v_dk2, v_at1, v_at2, v_at3, v_at4, v_zero; __m512 v_zt1, v_zt2, v_zt3, v_zt4; __m512d v_wp, v_d; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; ci2 = ci*ci; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4); v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_ci2 = _mm512_set1_ps(ci2); /* calculate magnetic field and sum field energy */ wp = 0.0; v_wp = _mm512_set1_pd(0.0); /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l), _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; /* add kz to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336), v_dky); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771), v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j+kk+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ /* at3 = dky*at1; */ /* at4 = dkz*at1; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j+kk+ll]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*(j+kj+lj)]) */ /* + crealf(cu[2+4*(j+kj+lj)])*_Complex_I;/ */ /* zt2 = -cimagf(cu[1+4*(j+kj+lj)]) */ /* + crealf(cu[1+4*(j+kj+lj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+kj+lj)]) */ /* + crealf(cu[4*(j+kj+lj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2; */ /* bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt1); /* wp += at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)]) */ /* + cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)]) */ /* + cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)])); */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* zt1 = -cimagf(cu[2+4*(j+k1+lj)]) */ /* + crealf(cu[2+4*(j+k1+lj)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+k1+lj)]) */ /* + crealf(cu[1+4*(j+k1+lj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+k1+lj)]) */ /* + crealf(cu[4*(j+k1+lj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336), v_zero,v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771), v_zero,v_at3); /* bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2; */ /* bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt1); /* wp += at1*(cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)]) */ /* + cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)]) */ /* + cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* zt1 = -cimagf(cu[2+4*(j+kj+l1)]) */ /* + crealf(cu[2+4*(j+kj+l1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+kj+l1)]) */ /* + crealf(cu[1+4*(j+kj+l1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+kj+l1)]) */ /* + crealf(cu[4*(j+kj+l1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771), v_zero,v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084), v_zero,v_at3); /* bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2; */ /* bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt1); /* wp += at1*(cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)]) */ /* + cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)]) */ /* + cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* zt1 = -cimagf(cu[2+4*(j+k1+l1)]) */ /* + crealf(cu[2+4*(j+k1+l1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+k1+l1)]) */ /* + crealf(cu[1+4*(j+k1+l1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+k1+l1)]) */ /* + crealf(cu[4*(j+k1+l1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(13107), v_zero,v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3855), v_zero,v_at3); /* bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2; */ /* bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt1); /* wp += at1*(cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)]) */ /* + cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)]) */ /* + cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j+kk+ll]); at2 = at1*dnx*(float) j; at3 = dky*at1; at4 = dkz*at1; at1 = at1*cimagf(ffc[j+kk+ll]); zt1 = -cimagf(cu[2+4*(j+kj+lj)]) + crealf(cu[2+4*(j+kj+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+kj+lj)]) + crealf(cu[1+4*(j+kj+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+kj+lj)]) + crealf(cu[4*(j+kj+lj)])*_Complex_I; bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2; bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1; bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+4*(j+k1+lj)]) + crealf(cu[2+4*(j+k1+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+k1+lj)]) + crealf(cu[1+4*(j+k1+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+k1+lj)]) + crealf(cu[4*(j+k1+lj)])*_Complex_I; bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2; bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1; bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3; zt1 = -cimagf(cu[2+4*(j+kj+l1)]) + crealf(cu[2+4*(j+kj+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+kj+l1)]) + crealf(cu[1+4*(j+kj+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+kj+l1)]) + crealf(cu[4*(j+kj+l1)])*_Complex_I; bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2; bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1; bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+4*(j+k1+l1)]) + crealf(cu[2+4*(j+k1+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+k1+l1)]) + crealf(cu[1+4*(j+k1+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+k1+l1)]) + crealf(cu[4*(j+k1+l1)])*_Complex_I; bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2; bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1; bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3; at1 = at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)]) + cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)]) + cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)]) + cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)]) + cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)]) + cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)]) + cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)]) + cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)]) + cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)]) + cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)]) + cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)]) + cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)])); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = ci2*crealf(ffc[kk+ll]); at3 = at1*dny*(float) k; at4 = dkz*at1; at1 = at1*cimagf(ffc[kk+ll]); zt1 = -cimagf(cu[2+4*(kj+lj)]) + crealf(cu[2+4*(kj+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(kj+lj)]) + crealf(cu[1+4*(kj+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(kj+lj)]) + crealf(cu[4*(kj+lj)])*_Complex_I; bxyz[4*(kj+lj)] = at3*zt1 - at4*zt2; bxyz[1+4*(kj+lj)] = at4*zt3; bxyz[2+4*(kj+lj)] = -at3*zt3; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; zt1 = -cimagf(cu[2+4*(kj+l1)]) + crealf(cu[2+4*(kj+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(kj+l1)]) + crealf(cu[1+4*(kj+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(kj+l1)]) + crealf(cu[4*(kj+l1)])*_Complex_I; bxyz[4*(kj+l1)] = at3*zt1 + at4*zt2; bxyz[1+4*(kj+l1)] = -at4*zt3; bxyz[2+4*(kj+l1)] = -at3*zt3; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; at1 = at1*(cu[4*(kj+lj)]*conjf(cu[4*(kj+lj)]) + cu[1+4*(kj+lj)]*conjf(cu[1+4*(kj+lj)]) + cu[2+4*(kj+lj)]*conjf(cu[2+4*(kj+lj)]) + cu[4*(kj+l1)]*conjf(cu[4*(kj+l1)]) + cu[1+4*(kj+l1)]*conjf(cu[1+4*(kj+l1)]) + cu[2+4*(kj+l1)]*conjf(cu[2+4*(kj+l1)])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15), (float *)&ffc[j+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ /* at4 = dkz*at1; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j+ll]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*(j+lj)]) */ /* + crealf(cu[2+4*(j+lj)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+lj)]) */ /* + crealf(cu[1+4*(j+lj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+lj)]) */ /* + crealf(cu[4*(j+lj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*(j+lj)] = -at4*zt2; */ /* bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+lj)] = at2*zt2; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt1); /* wp += at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)]) */ /* + cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)]) */ /* + cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)]) */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* bxyz[4*(j+k1+lj)] = zero; */ /* bxyz[1+4*(j+k1+lj)] = zero; */ /* bxyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero); /* zt1 = -cimagf(cu[2+4*(j+l1)]) */ /* + crealf(cu[2+4*(j+l1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+l1)]) */ /* + crealf(cu[1+4*(j+l1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+l1)]) */ /* + crealf(cu[4*(j+l1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771),v_zero, v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084),v_zero, v_at3); /* bxyz[4*(j+l1)] = at4*zt2; */ /* bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+l1)] = at2*zt2; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt1); /* wp += at1*(cu[4*(j+l1)]*conjf(cu[4*(j+l1)]) */ /* + cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)]) */ /* + cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j+ll]); at2 = at1*dnx*(float) j; at4 = dkz*at1; at1 = at1*cimagf(ffc[j+ll]); zt1 = -cimagf(cu[2+4*(j+lj)]) + crealf(cu[2+4*(j+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+lj)]) + crealf(cu[1+4*(j+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+lj)]) + crealf(cu[4*(j+lj)])*_Complex_I; bxyz[4*(j+lj)] = -at4*zt2; bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1; bxyz[2+4*(j+lj)] = at2*zt2; bxyz[4*(j+k1+lj)] = zero; bxyz[1+4*(j+k1+lj)] = zero; bxyz[2+4*(j+k1+lj)] = zero; zt1 = -cimagf(cu[2+4*(j+l1)]) + crealf(cu[2+4*(j+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+l1)]) + crealf(cu[1+4*(j+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+l1)]) + crealf(cu[4*(j+l1)])*_Complex_I; bxyz[4*(j+l1)] = at4*zt2; bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1; bxyz[2+4*(j+l1)] = at2*zt2; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)]) + cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)]) + cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)]) + cu[4*(j+l1)]*conjf(cu[4*(j+l1)]) + cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)]) + cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)])); wp += (double) at1; } /* mode numbers kx = 0, nx/2 */ at1 = ci2*crealf(ffc[ll]); at4 = dkz*at1; at1 = at1*cimagf(ffc[ll]); zt2 = -cimagf(cu[1+4*(lj)]) + crealf(cu[1+4*(lj)])*_Complex_I; zt3 = -cimagf(cu[4*(lj)]) + crealf(cu[4*(lj)])*_Complex_I; bxyz[4*lj] = -at4*zt2; bxyz[1+4*lj] = at4*zt3; bxyz[2+4*lj] = zero; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; at1 = at1*(cu[4*lj]*conjf(cu[4*lj]) + cu[1+4*lj]*conjf(cu[1+4*lj]) + cu[2+4*lj]*conjf(cu[2+4*lj])); wp += (double) at1; } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j+kk]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j+kk]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15), (float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ /* at3 = dky*at1; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j+kk]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*(j+kj)]) */ /* + crealf(cu[2+4*(j+kj)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+kj)]) */ /* + crealf(cu[1+4*(j+kj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+kj)]) */ /* + crealf(cu[4*(j+kj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*(j+kj)] = at3*zt1; */ /* bxyz[1+4*(j+kj)] = -at2*zt1; */ /* bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt1); /* wp += at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)]) */ /* + cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)]) */ /* + cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)])); */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* zt1 = -cimagf(cu[2+4*(j+k1)]) */ /* + crealf(cu[2+4*(j+k1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+k1)]) */ /* + crealf(cu[1+4*(j+k1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+k1)]) */ /* + crealf(cu[4*(j+k1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336),v_zero, v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771),v_zero, v_at3); /* bxyz[4*(j+k1)] = -at3*zt1; */ /* bxyz[1+4*(j+k1)] = -at2*zt1; */ /* bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt1); /* wp += at1*(cu[4*(j+k1)]*conjf(cu[4*(j+k1)]) */ /* + cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)]) */ /* + cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+kj+l1)] = zero; */ /* bxyz[1+4*(j+kj+l1)] = zero; */ /* bxyz[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; at1 = at1*cimagf(ffc[j+kk]); zt1 = -cimagf(cu[2+4*(j+kj)]) + crealf(cu[2+4*(j+kj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+kj)]) + crealf(cu[1+4*(j+kj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+kj)]) + crealf(cu[4*(j+kj)])*_Complex_I; bxyz[4*(j+kj)] = at3*zt1; bxyz[1+4*(j+kj)] = -at2*zt1; bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+4*(j+k1)]) + crealf(cu[2+4*(j+k1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+k1)]) + crealf(cu[1+4*(j+k1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+k1)]) + crealf(cu[4*(j+k1)])*_Complex_I; bxyz[4*(j+k1)] = -at3*zt1; bxyz[1+4*(j+k1)] = -at2*zt1; bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3; bxyz[4*(j+kj+l1)] = zero; bxyz[1+4*(j+kj+l1)] = zero; bxyz[2+4*(j+kj+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)]) + cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)]) + cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)]) + cu[4*(j+k1)]*conjf(cu[4*(j+k1)]) + cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)]) + cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)])); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = ci2*crealf(ffc[kk]); at3 = at1*dny*(float) k; at1 = at1*cimagf(ffc[kk]); zt1 = -cimagf(cu[2+4*(kj)]) + crealf(cu[2+4*(kj)])*_Complex_I; zt3 = -cimagf(cu[4*(kj)]) + crealf(cu[4*(kj)])*_Complex_I; bxyz[4*kj] = at3*zt1; bxyz[1+4*kj] = zero; bxyz[2+4*kj] = -at3*zt3; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; bxyz[4*(kj+l1)] = zero; bxyz[1+4*(kj+l1)] = zero; bxyz[2+4*(kj+l1)] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; at1 = at1*(cu[4*kj]*conjf(cu[4*kj]) + cu[1+4*kj]*conjf(cu[1+4*kj]) + cu[2+4*kj]*conjf(cu[2+4*kj])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15), (float *)&ffc[j+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*j]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*j] = zero; */ /* bxyz[1+4*j] = -at2*zt1; */ /* bxyz[2+4*j] = at2*zt2; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),v_zero); } _mm512_store_ps((float *)&bxyz[4*j],v_zt1); /* wp += at1*(cu[4*j]*conjf(cu[4*j]) */ /* + cu[1+4*j]*conjf(cu[1+4*j]) */ /* + cu[2+4*j]*conjf(cu[2+4*j])); */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1)] = zero; */ /* bxyz[1+4*(j+k1)] = zero; */ /* bxyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero); /* bxyz[4*(j+l1)] = zero; */ /* bxyz[1+4*(j+l1)] = zero; */ /* bxyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j]); at2 = at1*dnx*(float) j; at1 = at1*cimagf(ffc[j]); zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I; zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I; bxyz[4*j] = zero; bxyz[1+4*j] = -at2*zt1; bxyz[2+4*j] = at2*zt2; bxyz[4*(j+k1)] = zero; bxyz[1+4*(j+k1)] = zero; bxyz[2+4*(j+k1)] = zero; bxyz[4*(j+l1)] = zero; bxyz[1+4*(j+l1)] = zero; bxyz[2+4*(j+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(cu[4*j]*conjf(cu[4*j]) + cu[1+4*j]*conjf(cu[1+4*j]) + cu[2+4*j]*conjf(cu[2+4*j])); wp += (double) at1; } bxyz[0] = zero; bxyz[1] = zero; bxyz[2] = zero; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; d0 = _mm512_reduce_add_pd(v_wp); *wm = (wp + d0)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void ckncmaxwel3(float complex exyz[], float complex bxyz[], float complex cu[], float complex ffc[], float ci, float dt, float *wf, float *wm, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d maxwell's equation in fourier space for transverse electric and magnetic fields with periodic boundary conditions. input: all, output: wf, wm, exyz, bxyz approximate flop count is: 680*nxc*nyc*nzc + 149*(nxc*nyc + nxc*nzc + nyc*nzc) plus nxc*nyc*nzc divides where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the magnetic field is first updated half a step using the equations: bx[kz][ky][kx] = bx[kz][ky][kx] - .5*dt*sqrt(-1)* (ky*ez[kz][ky][kx]-kz*ey[kz][ky][kx]) by[kz][ky][kx] = by[kz][ky][kx] - .5*dt*sqrt(-1)* (kz*ex[kz][ky][kx]-kx*ez[kz][ky][kx]) bz[kz][ky][kx] = bz[kz][ky][kx] - .5*dt*sqrt(-1)* (kx*ey[kz][ky][kx]-ky*ex[kz][ky][kx]) the electric field is then updated a whole step using the equations: ex[kz][ky][kx] = ex[kz][ky][kx] + c2*dt*sqrt(-1) *(ky*bz[kz][ky][kx]-kz*by[kz][ky][kx]) - affp*dt*cux[kz][ky][kx]*s[kz][ky][kx] ey[kz][ky][kx] = ey[kz][ky][kx] + c2*dt*sqrt(-1)* *(kz*bx[kz][ky][kx]-kx*bz[kz][ky][kx]) - affp*dt*cuy[kz][ky][kx]*s[kz][ky][kx] ez[kz][ky][kx] = ez[kz][ky][kx] + c2*dt*sqrt(-1) *(kx*by[kz][ky][kx]-ky*bx[kz][ky][kx]) - affp*dt*cuz[kz][ky][kx]*s[kz][ky][kx] the magnetic field is finally updated the remaining half step with the new electric field and the previous magnetic field equations. where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, c2 = 1./(ci*ci) and s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2) j,k,l = fourier mode numbers, except for ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0, ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0, ex(kz=pi) = ey(kz=pi) = ez(kz=pi) = 0, ex(kx=0,ky=0,kz=0) = ey(kx=0,ky=0,kz=0) = ez(kx=0,ky=0,kz=0) = 0. and similarly for bx, by, bz. cu[l][k][j][i] = complex current density exyz[l][k][j][i] = complex transverse electric field bxyz[l][k][j][i] = complex magnetic field for component i, all for fourier mode (j1,k,l) real(ffc[0][0][0]) = affp = normalization constant = nx*ny*nz/np, where np=number of particles aimag(ffc[l][k][j]) = finite-size particle shape factor s, s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2) for fourier mode (j,k,l) ci = reciprocal of velocity of light dt = time interval between successive calculations transverse electric field energy is also calculated, using wf = nx*ny*nz**sum((1/affp)*|exyz[kz][ky][kx]|**2) magnetic field energy is also calculated, using wm = nx*ny*nz**sum((c2/affp)*|bxyz[kz][ky][kx]|**2) nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz nxhd = second dimension of form factor array, must be >= nxh nyhd = third dimension of form factor array, must be >= nyh nzhd = fourth dimension of form factor array, must be >= nzh requires KNC, cu, exyz, bxyz, ffc need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 cu, exyz, bxyz needs to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float dnx, dny, dnz, dth, c2, cdt, affp, anorm, dkx, dky, dkz; float adt, afdt; float at1; float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9; double wp, ws, d0; __m512i v_j, v_it, v_n, v_m; __m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz; __m512 v_zero, v_cdt, v_adt, v_afdt, v_dth, v_anorm; __m512 v_dk1, v_dk2, v_at1, v_at2, v_at3, v_at4; __m512 v_zt1, v_zt2, v_zt3, v_zt4, v_zt5, v_zt6, v_zt7; __m512d v_wp, v_ws, v_d; if (ci <= 0.0) return; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; dth = 0.5*dt; c2 = 1.0/(ci*ci); cdt = c2*dt; affp = creal(ffc[0]); adt = affp*dt; zero = 0.0 + 0.0*_Complex_I; anorm = 1.0/affp; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4); v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_cdt = _mm512_set1_ps(cdt); v_adt = _mm512_set1_ps(adt); v_dth = _mm512_set1_ps(dth); v_anorm = _mm512_set1_ps(anorm); /* update electromagnetic field and sum field energies */ ws = 0.0; wp = 0.0; v_wp = _mm512_set1_pd(0.0); v_ws = _mm512_set1_pd(0.0); /* calculate the electromagnetic fields */ /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l), _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; /* add kz to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336), v_dky); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771), v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* afdt = adt*cimagf(ffc[j+kk+ll]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step, ky > 0, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+kj+lj)]) */ /* + crealf(exyz[2+4*(j+kj+lj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+kj+lj)]) */ /* + crealf(exyz[1+4*(j+kj+lj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+kj+lj)]) */ /* + crealf(exyz[4*(j+kj+lj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2); */ /* zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1); */ /* zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+lj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2) */ /* - afdt*cu[4*(j+kj+lj)]; */ /* zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1) */ /* - afdt*cu[1+4*(j+kj+lj)]; */ /* zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3) */ /* - afdt*cu[2+4*(j+kj+lj)]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+kj+lj)] = zt7; */ /* exyz[1+4*(j+kj+lj)] = zt8; */ /* exyz[2+4*(j+kj+lj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+kj+lj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+kj+lj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt4 -= dth*(dky*zt1 - dkz*zt2); */ /* zt5 -= dth*(dkz*zt3 - dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+kj+lj)] = zt4; */ /* bxyz[1+4*(j+kj+lj)] = zt5; */ /* bxyz[2+4*(j+kj+lj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+kj+lj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* update magnetic field half time step, ky < 0, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+k1+lj)]) */ /* + crealf(exyz[2+4*(j+k1+lj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+k1+lj)]) */ /* + crealf(exyz[1+4*(j+k1+lj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+k1+lj)]) */ /* + crealf(exyz[4*(j+k1+lj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336), v_zero,v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771), v_zero,v_dk2); /* zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2); */ /* zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1); */ /* zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+lj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2) */ /* - afdt*cu[4*(j+k1+lj)]; */ /* zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1) */ /* - afdt*cu[1+4*(j+k1+lj)]; */ /* zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3) */ /* - afdt*cu[2+4*(j+k1+lj)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+k1+lj)] = zt7; */ /* exyz[1+4*(j+k1+lj)] = zt8; */ /* exyz[2+4*(j+k1+lj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+k1+lj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 += dth*(dky*zt1 + dkz*zt2); */ /* zt5 -= dth*(dkz*zt3 - dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+k1+lj)] = zt4; */ /* bxyz[1+4*(j+k1+lj)] = zt5; */ /* bxyz[2+4*(j+k1+lj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+k1+lj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* update magnetic field half time step, ky > 0, kz < 0 */ /* zt1 = -cimagf(exyz[2+4*(j+kj+l1)]) */ /* + crealf(exyz[2+4*(j+kj+l1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+kj+l1)]) */ /* + crealf(exyz[1+4*(j+kj+l1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+kj+l1)]) */ /* + crealf(exyz[4*(j+kj+l1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771), v_zero,v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084), v_zero,v_dk2); /* zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2); */ /* zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1); */ /* zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+l1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2) */ /* - afdt*cu[4*(j+kj+l1)]; */ /* zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1) */ /* - afdt*cu[1+4*(j+kj+l1)]; */ /* zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3) */ /* - afdt*cu[2+4*(j+kj+l1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+kj+l1)] = zt7; */ /* exyz[1+4*(j+kj+l1)] = zt8; */ /* exyz[2+4*(j+kj+l1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+kj+l1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 -= dth*(dky*zt1 + dkz*zt2); */ /* zt5 += dth*(dkz*zt3 + dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+kj+l1)] = zt4; */ /* bxyz[1+4*(j+kj+l1)] = zt5; */ /* bxyz[2+4*(j+kj+l1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+kj+l1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* update magnetic field half time step, ky < 0, kz < 0 */ /* zt1 = -cimagf(exyz[2+4*(j+k1+l1)]) */ /* + crealf(exyz[2+4*(j+k1+l1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+k1+l1)]) */ /* + crealf(exyz[1+4*(j+k1+l1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+k1+l1)]) */ /* + crealf(exyz[4*(j+k1+l1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(13107), v_zero,v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3855), v_zero,v_dk2); /* zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2); */ /* zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1); */ /* zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+l1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2) */ /* - afdt*cu[4*(j+k1+l1)]; */ /* zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1) */ /* - afdt*cu[1+4*(j+k1+l1)]; */ /* zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3) */ /* - afdt*cu[2+4*(j+k1+l1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+k1+l1)] = zt7; */ /* exyz[1+4*(j+k1+l1)] = zt8; */ /* exyz[2+4*(j+k1+l1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+k1+l1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 += dth*(dky*zt1 - dkz*zt2); */ /* zt5 += dth*(dkz*zt3 + dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+k1+l1)] = zt4; */ /* bxyz[1+4*(j+k1+l1)] = zt5; */ /* bxyz[2+4*(j+k1+l1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+k1+l1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+kk+ll]); /* update magnetic field half time step, ky > 0, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+kj+lj)]) + crealf(exyz[2+4*(j+kj+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+kj+lj)]) + crealf(exyz[1+4*(j+kj+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+kj+lj)]) + crealf(exyz[4*(j+kj+lj)])*_Complex_I; zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2) - afdt*cu[4*(j+kj+lj)]; zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+4*(j+kj+lj)]; zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+4*(j+kj+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+kj+lj)] = zt7; exyz[1+4*(j+kj+lj)] = zt8; exyz[2+4*(j+kj+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 - dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[4*(j+kj+lj)] = zt4; bxyz[1+4*(j+kj+lj)] = zt5; bxyz[2+4*(j+kj+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky < 0, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+k1+lj)]) + crealf(exyz[2+4*(j+k1+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+k1+lj)]) + crealf(exyz[1+4*(j+k1+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+k1+lj)]) + crealf(exyz[4*(j+k1+lj)])*_Complex_I; zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2) - afdt*cu[4*(j+k1+lj)]; zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+4*(j+k1+lj)]; zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+4*(j+k1+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+k1+lj)] = zt7; exyz[1+4*(j+k1+lj)] = zt8; exyz[2+4*(j+k1+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dky*zt1 + dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[4*(j+k1+lj)] = zt4; bxyz[1+4*(j+k1+lj)] = zt5; bxyz[2+4*(j+k1+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky > 0, kz < 0 */ zt1 = -cimagf(exyz[2+4*(j+kj+l1)]) + crealf(exyz[2+4*(j+kj+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+kj+l1)]) + crealf(exyz[1+4*(j+kj+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+kj+l1)]) + crealf(exyz[4*(j+kj+l1)])*_Complex_I; zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2) - afdt*cu[4*(j+kj+l1)]; zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+4*(j+kj+l1)]; zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+4*(j+kj+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+kj+l1)] = zt7; exyz[1+4*(j+kj+l1)] = zt8; exyz[2+4*(j+kj+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 + dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[4*(j+kj+l1)] = zt4; bxyz[1+4*(j+kj+l1)] = zt5; bxyz[2+4*(j+kj+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky < 0, kz < 0 */ zt1 = -cimagf(exyz[2+4*(j+k1+l1)]) + crealf(exyz[2+4*(j+k1+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+k1+l1)]) + crealf(exyz[1+4*(j+k1+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+k1+l1)]) + crealf(exyz[4*(j+k1+l1)])*_Complex_I; zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2) - afdt*cu[4*(j+k1+l1)]; zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+4*(j+k1+l1)]; zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+4*(j+k1+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+k1+l1)] = zt7; exyz[1+4*(j+k1+l1)] = zt8; exyz[2+4*(j+k1+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dky*zt1 - dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[4*(j+k1+l1)] = zt4; bxyz[1+4*(j+k1+l1)] = zt5; bxyz[2+4*(j+k1+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; afdt = adt*cimagf(ffc[kk+ll]); /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+4*(kj+lj)]) + crealf(exyz[2+4*(kj+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(kj+lj)]) + crealf(exyz[1+4*(kj+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(kj+lj)]) + crealf(exyz[4*(kj+lj)])*_Complex_I; zt4 = bxyz[4*(kj+lj)] - dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+4*(kj+lj)] - dth*(dkz*zt3); zt6 = bxyz[2+4*(kj+lj)] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(kj+lj)] + cdt*(dky*zt1 - dkz*zt2) - afdt*cu[4*(kj+lj)]; zt8 = exyz[1+4*(kj+lj)] + cdt*(dkz*zt3) - afdt*cu[1+4*(kj+lj)]; zt9 = exyz[2+4*(kj+lj)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(kj+lj)] = zt7; exyz[1+4*(kj+lj)] = zt8; exyz[2+4*(kj+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 - dkz*zt2); zt5 -= dth*(dkz*zt3); zt6 += dth*(dky*zt3); bxyz[4*(kj+lj)] = zt4; bxyz[1+4*(kj+lj)] = zt5; bxyz[2+4*(kj+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; exyz[4*(k1+lj)] = zero; exyz[1+4*(k1+lj)] = zero; exyz[2+4*(k1+lj)] = zero; /* update magnetic field half time step, kz < 0 */ zt1 = -cimagf(exyz[2+4*(kj+l1)]) + crealf(exyz[2+4*(kj+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(kj+l1)]) + crealf(exyz[1+4*(kj+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(kj+l1)]) + crealf(exyz[4*(kj+l1)])*_Complex_I; zt4 = bxyz[4*(kj+l1)] - dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+4*(kj+l1)] + dth*(dkz*zt3); zt6 = bxyz[2+4*(kj+l1)] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(kj+l1)] + cdt*(dky*zt1 + dkz*zt2) - afdt*cu[4*(kj+l1)]; zt8 = exyz[1+4*(kj+l1)] - cdt*(dkz*zt3) - afdt*cu[1+4*(kj+l1)]; zt9 = exyz[2+4*(kj+l1)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(kj+l1)] = zt7; exyz[1+4*(kj+l1)] = zt8; exyz[2+4*(kj+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 + dkz*zt2); zt5 += dth*(dkz*zt3); zt6 += dth*(dky*zt3); bxyz[4*(kj+l1)] = zt4; bxyz[1+4*(kj+l1)] = zt5; bxyz[2+4*(kj+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; exyz[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* afdt = adt*cimagf(ffc[j+ll]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+ll]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+ll+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+lj)]) */ /* + crealf(exyz[2+4*(j+lj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+lj)]) */ /* + crealf(exyz[1+4*(j+lj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+lj)]) */ /* + crealf(exyz[4*(j+lj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2); */ /* zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1); */ /* zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+lj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)]; */ /* zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1) */ /* - afdt*cu[1+4*(j+lj)]; */ /* zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+lj)] = zt7; */ /* exyz[1+4*(j+lj)] = zt8; */ /* exyz[2+4*(j+lj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+lj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+lj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt4 += dth*(dkz*zt2); */ /* zt5 -= dth*(dkz*zt3 - dkx*zt1); */ /* zt6 -= dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+lj)] = zt4; */ /* bxyz[1+4*(j+lj)] = zt5; */ /* bxyz[2+4*(j+lj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+lj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* bxyz[4*(j+k1+lj)] = zero; */ /* bxyz[1+4*(j+k1+lj)] = zero; */ /* bxyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero); /* exyz[4*(j+k1+lj)] = zero; */ /* exyz[1+4*(j+k1+lj)] = zero; */ /* exyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zero); /* update magnetic field half time step, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+l1)]) */ /* + crealf(exyz[2+4*(j+l1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+l1)]) */ /* + crealf(exyz[1+4*(j+l1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+l1)]) */ /* + crealf(exyz[4*(j+l1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771),v_zero, v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084),v_zero, v_dk2); /* zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2); */ /* zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1); */ /* zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+l1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)]; */ /* zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1) */ /* - afdt*cu[1+4*(j+l1)]; */ /* zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+l1)] = zt7; */ /* exyz[1+4*(j+l1)] = zt8; */ /* exyz[2+4*(j+l1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+l1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+l1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 -= dth*(dkz*zt2); */ /* zt5 += dth*(dkz*zt3 + dkx*zt1); */ /* zt6 -= dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+l1)] = zt4; */ /* bxyz[1+4*(j+l1)] = zt5; */ /* bxyz[2+4*(j+l1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+l1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); /* exyz[4*(j+k1+l1)] = zero; */ /* exyz[1+4*(j+k1+l1)] = zero; */ /* exyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+ll]); /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+lj)]) + crealf(exyz[2+4*(j+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+lj)]) + crealf(exyz[1+4*(j+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+lj)]) + crealf(exyz[4*(j+lj)])*_Complex_I; zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2); zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)]; zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+4*(j+lj)]; zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+lj)] = zt7; exyz[1+4*(j+lj)] = zt8; exyz[2+4*(j+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[4*(j+lj)] = zt4; bxyz[1+4*(j+lj)] = zt5; bxyz[2+4*(j+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+k1+lj)] = zero; bxyz[1+4*(j+k1+lj)] = zero; bxyz[2+4*(j+k1+lj)] = zero; exyz[4*(j+k1+lj)] = zero; exyz[1+4*(j+k1+lj)] = zero; exyz[2+4*(j+k1+lj)] = zero; /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+l1)]) + crealf(exyz[2+4*(j+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+l1)]) + crealf(exyz[1+4*(j+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+l1)]) + crealf(exyz[4*(j+l1)])*_Complex_I; zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2); zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)]; zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+4*(j+l1)]; zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+l1)] = zt7; exyz[1+4*(j+l1)] = zt8; exyz[2+4*(j+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[4*(j+l1)] = zt4; bxyz[1+4*(j+l1)] = zt5; bxyz[2+4*(j+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; exyz[4*(j+k1+l1)] = zero; exyz[1+4*(j+k1+l1)] = zero; exyz[2+4*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ afdt = adt*cimagf(ffc[ll]); /* update magnetic field half time step */ zt2 = -cimagf(exyz[1+4*(lj)]) + crealf(exyz[1+4*(lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(lj)]) + crealf(exyz[4*(lj)])*_Complex_I; zt4 = bxyz[4*lj] + dth*(dkz*zt2); zt5 = bxyz[1+4*lj] - dth*(dkz*zt3); /* update electric field whole time step */ zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*lj] - cdt*(dkz*zt2) - afdt*cu[4*lj]; zt8 = exyz[1+4*lj] + cdt*(dkz*zt3) - afdt*cu[1+4*lj]; /* update magnetic field half time step and store electric field */ zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*lj] = zt7; exyz[1+4*lj] = zt8; exyz[2+4*lj] = zero; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)); ws += (double) at1; zt4 += dth*(dkz*zt2); zt5 -= dth*(dkz*zt3); bxyz[4*lj] = zt4; bxyz[1+4*lj] = zt5; bxyz[2+4*lj] = zero; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)); wp += (double) at1; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; exyz[4*(k1+lj)] = zero; exyz[1+4*(k1+lj)] = zero; exyz[2+4*(k1+lj)] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; exyz[4*l1] = zero; exyz[1+4*l1] = zero; exyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; exyz[2+4*(k1+l1)]= zero; } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; for (k = 1; k < nyh; k++) { /* dky = dny*(float) k; */ v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* afdt = adt*cimagf(ffc[j+kk]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step, ky > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+kj)]) */ /* + crealf(exyz[2+4*(j+kj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+kj)]) */ /* + crealf(exyz[1+4*(j+kj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+kj)]) */ /* + crealf(exyz[4*(j+kj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1); */ /* zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1); */ /* zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)]; */ /* zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)]; */ /* zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3) */ /* - afdt*cu[2+4*(j+kj)]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+kj)] = zt7; */ /* exyz[1+4*(j+kj)] = zt8; */ /* exyz[2+4*(j+kj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+kj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+kj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt4 -= dth*(dky*zt1); */ /* zt5 += dth*(dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+kj)] = zt4; */ /* bxyz[1+4*(j+kj)] = zt5; */ /* bxyz[2+4*(j+kj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+kj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* update magnetic field half time step, ky < 0 */ /* zt1 = -cimagf(exyz[2+4*(j+k1)]) */ /* + crealf(exyz[2+4*(j+k1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+k1)]) */ /* + crealf(exyz[1+4*(j+k1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+k1)]) */ /* + crealf(exyz[4*(j+k1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336),v_zero, v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771),v_zero, v_dk2); /* zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1); */ /* zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1); */ /* zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)]; */ /* zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)]; */ /* zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3) */ /* - afdt*cu[2+4*(j+k1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+k1)] = zt7; */ /* exyz[1+4*(j+k1)] = zt8; */ /* exyz[2+4*(j+k1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+k1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+k1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 += dth*(dky*zt1); */ /* zt5 += dth*(dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+k1)] = zt4; */ /* bxyz[1+4*(j+k1)] = zt5; */ /* bxyz[2+4*(j+k1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+k1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+kj+l1)] = zero; */ /* bxyz[1+4*(j+kj+l1)] = zero; */ /* bxyz[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero); /* exyz[4*(j+kj+l1)] = zero; */ /* exyz[1+4*(j+kj+l1)] = zero; */ /* exyz[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); /* exyz[4*(j+k1+l1)] = zero; */ /* exyz[1+4*(j+k1+l1)] = zero; */ /* exyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+kk]); /* update magnetic field half time step, ky > 0 */ zt1 = -cimagf(exyz[2+4*(j+kj)]) + crealf(exyz[2+4*(j+kj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+kj)]) + crealf(exyz[1+4*(j+kj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+kj)]) + crealf(exyz[4*(j+kj)])*_Complex_I; zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1); zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1); zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)]; zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)]; zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+4*(j+kj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+kj)] = zt7; exyz[1+4*(j+kj)] = zt8; exyz[2+4*(j+kj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[4*(j+kj)] = zt4; bxyz[1+4*(j+kj)] = zt5; bxyz[2+4*(j+kj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky < 0 */ zt1 = -cimagf(exyz[2+4*(j+k1)]) + crealf(exyz[2+4*(j+k1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+k1)]) + crealf(exyz[1+4*(j+k1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+k1)]) + crealf(exyz[4*(j+k1)])*_Complex_I; zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1); zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1); zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)]; zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)]; zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+4*(j+k1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+k1)] = zt7; exyz[1+4*(j+k1)] = zt8; exyz[2+4*(j+k1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[4*(j+k1)] = zt4; bxyz[1+4*(j+k1)] = zt5; bxyz[2+4*(j+k1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+kj+l1)] = zero; bxyz[1+4*(j+kj+l1)] = zero; bxyz[2+4*(j+kj+l1)] = zero; exyz[4*(j+kj+l1)] = zero; exyz[1+4*(j+kj+l1)] = zero; exyz[2+4*(j+kj+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; exyz[4*(j+k1+l1)] = zero; exyz[1+4*(j+k1+l1)] = zero; exyz[2+4*(j+k1+l1)] = zero; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; afdt = adt*cimagf(ffc[kk]); /* update magnetic field half time step */ zt1 = -cimagf(exyz[2+4*(kj)]) + crealf(exyz[2+4*(kj)])*_Complex_I; zt3 = -cimagf(exyz[4*(kj)]) + crealf(exyz[4*(kj)])*_Complex_I; zt4 = bxyz[4*kj] - dth*(dky*zt1); zt6 = bxyz[2+4*kj] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*kj] + cdt*(dky*zt1) - afdt*cu[4*kj]; zt9 = exyz[2+4*kj] - cdt*(dky*zt3) - afdt*cu[2+4*kj]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*kj] = zt7; exyz[1+4*kj] = zero; exyz[2+4*kj] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1); zt6 += dth*(dky*zt3); bxyz[4*kj] = zt4; bxyz[1+4*kj] = zero; bxyz[2+4*kj] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; exyz[4*k1] = zero; exyz[1+4*k1] = zero; exyz[2+4*k1] = zero; bxyz[4*(kj+l1)] = zero; bxyz[1+4*(kj+l1)] = zero; bxyz[2+4*(kj+l1)]= zero; exyz[4*(kj+l1)] = zero; exyz[1+4*(kj+l1)] = zero; exyz[2+4*(kj+l1)] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; exyz[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx); /* afdt = adt*cimagf(ffc[j]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step */ /* zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*j]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt5 = bxyz[1+4*j] + dth*(dkx*zt1); */ /* zt6 = bxyz[2+4*j] - dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*j]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j]; */ /* zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*j]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),v_afdt, v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*j] = zero; */ /* exyz[1+4*j] = zt8; */ /* exyz[2+4*j] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),v_zero); _mm512_mask_store_ps((float *)&exyz[4*j], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*j],v_zt4); } /* ws += anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt5 += dth*(dkx*zt1); */ /* zt6 -= dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*j] = zero; */ /* bxyz[1+4*j] = zt5; */ /* bxyz[2+4*j] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),v_zero); _mm512_mask_store_ps((float *)&bxyz[4*j], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*j],v_zt5); } /* wp += anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1)] = zero; */ /* bxyz[1+4*(j+k1)] = zero; */ /* bxyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero); /* exyz[4*(j+k1)] = zero; */ /* exyz[1+4*(j+k1)] = zero; */ /* exyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1)],v_zero); /* bxyz[4*(j+l1)] = zero; */ /* bxyz[1+4*(j+l1)] = zero; */ /* bxyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero); /* exyz[4*(j+l1)] = zero; */ /* exyz[1+4*(j+l1)] = zero; */ /* exyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); /* exyz[4*(j+k1+l1)] = zero; */ /* exyz[1+4*(j+k1+l1)] = zero; */ /* exyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j]); /* update magnetic field half time step */ zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I; zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I; zt5 = bxyz[1+4*j] + dth*(dkx*zt1); zt6 = bxyz[2+4*j] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j]; zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; exyz[4*j] = zero; exyz[1+4*j] = zt8; exyz[2+4*j] = zt9; at1 = anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[4*j] = zero; bxyz[1+4*j] = zt5; bxyz[2+4*j] = zt6; at1 = anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+k1)] = zero; bxyz[1+4*(j+k1)] = zero; bxyz[2+4*(j+k1)] = zero; exyz[4*(j+k1)] = zero; exyz[1+4*(j+k1)] = zero; exyz[2+4*(j+k1)] = zero; bxyz[4*(j+l1)] = zero; bxyz[1+4*(j+l1)] = zero; bxyz[2+4*(j+l1)] = zero; exyz[4*(j+l1)] = zero; exyz[1+4*(j+l1)] = zero; exyz[2+4*(j+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; exyz[4*(j+k1+l1)] = zero; exyz[1+4*(j+k1+l1)] = zero; exyz[2+4*(j+k1+l1)] = zero; } bxyz[0] = zero; bxyz[1] = zero; bxyz[2] = zero; exyz[0] = zero; exyz[1] = zero; exyz[2]= zero; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; exyz[4*k1] = zero; exyz[1+4*k1] = zero; exyz[2+4*k1] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; exyz[4*l1] = zero; exyz[1+4*l1] = zero; exyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; exyz[2+4*(k1+l1)] = zero; d0 = _mm512_reduce_add_pd(v_ws); *wf = (ws + d0)*((float) nx)*((float) ny)*((float) nz); d0 = _mm512_reduce_add_pd(v_wp); *wm = c2*(wp + d0)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void ckncemfield3(float complex fxyz[], float complex exyz[], float complex ffc[], int isign, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine either adds complex vector fields if isign > 0 or copies complex vector fields if isign < 0 includes additional smoothing requires KNC, fxyz, exyz, ffc need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 fxyz, exyz needs to have 4 components local data */ int j, k, l, nxh, nyh, nzh, nxhs, itn, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float at1; __m512 v_at1, v_zero, v_zt1, v_zt2; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; v_zero = _mm512_setzero_ps(); /* add the fields */ if (isign > 0) { for (l = 1; l < nzh; l++) { ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1; */ /* fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1; */ /* fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk+ll]); fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1; fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1; fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1; fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1; */ /* fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1; */ /* fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+ll]); fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1; fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1; fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1; fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } l1 = nxvyh*nzh; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1; */ /* fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1; */ /* fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2); /* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1; fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1; fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1; fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*j] += exyz[4*j]*at1; */ /* fxyz[1+4*j] += exyz[1+4*j]*at1; */ /* fxyz[2+4*j] += exyz[2+4*j]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*j]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*j]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*j],v_zt2); /* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j]); fxyz[4*j] += exyz[4*j]*at1; fxyz[1+4*j] += exyz[1+4*j]*at1; fxyz[2+4*j] += exyz[2+4*j]*at1; fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } /* copy the fields */ else if (isign < 0) { for (l = 1; l < nzh; l++) { ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1; */ /* fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1; */ /* fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk+ll]); fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1; fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1; fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1; fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1; */ /* fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1; */ /* fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+ll]); fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1; fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1; fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1; fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } l1 = nxvyh*nzh; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1; */ /* fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1; */ /* fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2); /* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1; fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1; fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1; fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*j] = exyz[4*j]*at1; */ /* fxyz[1+4*j] = exyz[1+4*j]*at1; */ /* fxyz[2+4*j] = exyz[2+4*j]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*j]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*j],v_zt2); /* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j]); fxyz[4*j] = exyz[4*j]*at1; fxyz[1+4*j] = exyz[1+4*j]*at1; fxyz[2+4*j] = exyz[2+4*j]*at1; fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rmxy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in x and y is performed f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)* exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in x and y is performed f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 8 written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd; int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nss, nxhs, nxhhs, itn; float ani; float complex t1, t2, t3; __m512i v_j, v_kmr, v_m, v_n, v_it; __m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani; v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0); if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhyd = nxhd*nyd; nxhs = 8*(nxh/8); nxhhs = 8*(nxhh/8); itn = 1 > nxhhs ? 1 : nxhhs; v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0); v_n = _mm512_set_epi32(1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L180; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3, \ v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 8*(ns/8); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (i = 0; i < ny; i++) { joff = nxhd*i + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nss; j+=8) { /* t1 = sct[kmr*j]; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k2+joff],v_t4); /* f[j+k1+joff] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = sct[kmr*j]; t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); v_ani = _mm512_set1_ps(ani); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd*k + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhhs; j+=8) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845), v_zero,v_t3); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[nxh-j+joff-7]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[nxh-j+joff+1]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[j+joff] = ani*(t1 + t2); */ v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2)); /* f[nxh-j+joff] = ani*conjf(t1 - t2); */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); v_t4 = _mm512_mul_ps(v_ani,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[j+joff], _mm512_int2mask(65532),v_t3); _mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7], _mm512_int2mask(16383),v_t4); _mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1], _mm512_int2mask(16383),v_t4); } else { _mm512_store_ps((float *)&f[j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4); _mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = ani*(t1 + t2); f[nxh-j+joff] = ani*conjf(t1 - t2); } } ani = 2.0*ani; for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = ani*conjf(f[nxhh+joff]); f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[i+k1]); /* f[i+k1] = f[i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[i+joff]); _mm512_store_ps((float *)&f[i+k1],v_t2); /* f[i+joff] = t1; */ _mm512_store_ps((float *)&f[i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j2],v_t4); /* f[i+j1] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = f[k1]; f[k1] = 0.5*(cimagf(f[joff] + t1) + crealf(f[joff] - t1)*_Complex_I); f[joff] = 0.5*(crealf(f[joff] + t1) + cimagf(f[joff] - t1)*_Complex_I); } } return; /* forward fourier transform */ L180: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3,v_it, \ v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I; f[k1] = conjf(f[joff] - t1); f[joff] += t1; } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[i+k1]); /* f[i+k1] = f[i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[i+joff]); _mm512_store_ps((float *)&f[i+k1],v_t2); /* f[i+joff] = t1; */ _mm512_store_ps((float *)&f[i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j2],v_t4); /* f[i+j1] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd*k + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhhs; j+=8) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[nxh-j+joff-7]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[nxh-j+joff+1]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[j+joff] = t1 + t2; */ v_t3 = _mm512_add_ps(v_t1,v_t2); /* f[nxh-j+joff] = conjf(t1 - t2); */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[j+joff], _mm512_int2mask(65532),v_t3); _mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7], _mm512_int2mask(16383),v_t4); _mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1], _mm512_int2mask(16383),v_t4); } else { _mm512_store_ps((float *)&f[j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4); _mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = t1 + t2; f[nxh-j+joff] = conjf(t1 - t2); } } for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 8*(ns/8); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (i = 0; i < ny; i++) { joff = nxhd*i + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nss; j+=8) { /* t1 = conjf(sct[kmr*j]); */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690), v_zero,v_t1); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k2+joff],v_t4); /* f[j+k1+joff] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = conjf(sct[kmr*j]); t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rmz(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in z is performed f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, a forward fourier transform in z is performed f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 8 written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff; int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1; int nss, nxhs; float complex t1, t2; __m512 v_zero, v_t1, v_t2, v_t3, v_t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhyd = nxhd*nyd; nxhs = 8*(nxh/8); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L90; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \ v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[i+i1]); /* f[i+i1] = f[i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[i+i0]); _mm512_store_ps((float *)&f[i+i1],v_t2); /* f[i+i0] = t1; */ _mm512_store_ps((float *)&f[i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+i1] = f[i+i0] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i1],v_t4); /* f[i+i0] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; t1 = f[l1]; f[l1] = 0.5*(cimagf(f[ll] + t1) + crealf(f[ll] - t1)*_Complex_I); f[ll] = 0.5*(crealf(f[ll] + t1) + cimagf(f[ll] - t1)*_Complex_I); } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = f[i1]; f[i1] = 0.5*(cimagf(f[i0] + t1) + crealf(f[i0] - t1)*_Complex_I); f[i0] = 0.5*(crealf(f[i0] + t1) + cimagf(f[i0] - t1)*_Complex_I); } } return; /* forward fourier transform */ L90: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I; f[l1] = conjf(f[ll] - t1); f[ll] += t1; } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I; f[i1] = conjf(f[i0] - t1); f[i0] += t1; } } /* bit-reverse array elements in z */ #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \ v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[i+i1]); /* f[i+i1] = f[i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[i+i0]); _mm512_store_ps((float *)&f[i+i1],v_t2); /* f[i+i0] = t1; */ _mm512_store_ps((float *)&f[i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+i1] = f[i+i0] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i1],v_t4); /* f[i+i0] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rm3xy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in x and y are performed f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]* exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, three forward fourier transforms in x and y are performed f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 2 f needs to have 4 components written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd4, nxhyd; int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nss, nxhs, nxhhs, itn; float at1, at2, ani; float complex t1, t2, t3, t4; __m512i v_j, v_kmr, v_m, v_n, v_l, v_it; __m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani, v_half; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhd4 = 4*nxhd; nxhyd = nxhd4*nyd; nxhs = 2*(nxh/2); nxhhs = 2*(nxhh/2); itn = 1 > nxhhs ? 1 : nxhhs; v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0); v_n = _mm512_set_epi32(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); v_half = _mm512_set1_ps(0.5f); if (isign > 0) goto L230; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; v_l = _mm512_set_epi32(15,11,14,10,13,9,12,8,7,3,6,2,5,1,4,0); #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \ ani,t1,t2,t3,t4,v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(f[2+4*j+joff]); */ /* at2 = crealf(f[2+4*j+joff]); */ /* f[2+4*j+joff] = crealf(f[1+4*j+joff]) */ /* + crealf(f[3+4*j+joff])*_Complex_I; */ /* f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; */ /* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */ v_t1 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1); _mm512_store_ps((float *)&f[4*j+joff],v_t1); } /* loop over remaining elements */ for (j = nxhs; j < nxh; j++) { at1 = cimagf(f[2+4*j+joff]); at2 = crealf(f[2+4*j+joff]); f[2+4*j+joff] = crealf(f[1+4*j+joff]) + crealf(f[3+4*j+joff])*_Complex_I; f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; } } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm512_mask_loadunpacklo_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff]); v_t1 = _mm512_mask_loadunpackhi_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff+8]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm512_mask_loadunpacklo_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff]); v_t2 = _mm512_mask_loadunpackhi_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff+8]); _mm512_mask_packstorelo_ps((float *)&f[4*j1+joff], _mm512_int2mask(255),v_t2); _mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8], _mm512_int2mask(255),v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm512_mask_packstorelo_ps((float *)&f[4*j+joff], _mm512_int2mask(255),v_t1); _mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8], _mm512_int2mask(255),v_t1); } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 2*(ns/2); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = sct[kmr*j]; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ /* t4 = t1*f[2+4*j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k2+joff],v_t4); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = sct[kmr*j]; t2 = t1*f[4*j+k2+joff]; t3 = t1*f[1+4*j+k2+joff]; t4 = t1*f[2+4*j+k2+joff]; f[4*j+k2+joff] = f[4*j+k1+joff] - t2; f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; f[4*j+k1+joff] += t2; f[1+4*j+k1+joff] += t3; f[2+4*j+k1+joff] += t4; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); v_ani = _mm512_set1_ps(ani); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845), v_zero,v_t3); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* for (jj = 0; jj < 3; jj++) { */ /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff+8]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[jj+4*j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[jj+4*j+joff] = ani*(t1 + t2); */ v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2)); /* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */ /* } */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); v_t4 = _mm512_mul_ps(v_ani,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[4*j+joff], _mm512_int2mask(65280),v_t3); _mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff], _mm512_int2mask(255),v_t4); _mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8], _mm512_int2mask(255),v_t4); } else { _mm512_store_ps((float *)&f[4*j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4); _mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+4*(nxh-j)+joff]); t1 = f[jj+4*j+joff] + t2; t2 = (f[jj+4*j+joff] - t2)*t3; f[jj+4*j+joff] = ani*(t1 + t2); f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); } } } /* ani = 2.0*ani; */ v_ani = _mm512_add_ps(v_ani,v_ani); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* for (jj = 0; jj < 3; jj++) { */ /* f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]); */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[4*nxhh+joff]); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero, v_t1); v_t1 = _mm512_mul_ps(v_ani,v_t1); _mm512_mask_store_ps((float *)&f[4*nxhh+joff], _mm512_int2mask(63),v_t1); /* f[jj+joff] = ani*((crealf(f[jj+joff]) */ /* + cimagf(f[jj+joff])) */ /* + (crealf(f[jj+joff]) */ /* - cimagf(f[jj+joff]))*_Complex_I); */ /* } */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2); v_t3 = _mm512_mul_ps(v_ani,v_t3); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t3); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd4*k1 + nn; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+k1]); /* f[4*i+k1] = f[4*i+joff]; */ /* f[1+4*i+k1] = f[1+4*i+joff]; */ /* f[2+4*i+k1] = f[2+4*i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+joff]); _mm512_store_ps((float *)&f[4*i+k1],v_t2); /* f[4*i+joff] = t1; */ /* f[1+4*i+joff] = t2; */ /* f[2+4*i+joff] = t3; */ _mm512_store_ps((float *)&f[4*i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+k1]; t2 = f[1+4*i+k1]; t3 = f[2+4*i+k1]; f[4*i+k1] = f[4*i+joff]; f[1+4*i+k1] = f[1+4*i+joff]; f[2+4*i+k1] = f[2+4*i+joff]; f[4*i+joff] = t1; f[1+4*i+joff] = t2; f[2+4*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd4*(j + k1) + nn; j2 = nxhd4*(j + k2) + nn; t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ /* t4 = t1*f[2+4*i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j2],v_t4); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ /* f[2+4*i+j1] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+j2]; t3 = t1*f[1+4*i+j2]; t4 = t1*f[2+4*i+j2]; f[4*i+j2] = f[4*i+j1] - t2; f[1+4*i+j2] = f[1+4*i+j1] - t3; f[2+4*i+j2] = f[2+4*i+j1] - t4; f[4*i+j1] += t2; f[1+4*i+j1] += t3; f[2+4*i+j1] += t4; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd4*k; k1 = nxhd4*ny - joff + nn; joff += nn; /* for (jj = 0; jj < 3; jj++) { */ /* t1 = f[jj+k1]; */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[k1]); /* f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1) */ /* + crealf(f[jj+joff] - t1)*_Complex_I); */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(42),v_t2,v_t1); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21),v_t2,v_t1); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); v_t3 = _mm512_mul_ps(v_half,v_t3); _mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3); /* f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1) */ /* + cimagf(f[jj+joff] - t1)*_Complex_I); */ /* } */ v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t2,v_t1); v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(21),v_t2,v_t1); v_t2 = _mm512_mul_ps(v_half,v_t2); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),v_t2); } } return; /* forward fourier transform */ L230: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; v_l = _mm512_set_epi32(15,13,11,9,14,12,10,8,7,5,3,1,6,4,2,0); #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \ t1,t2,t3,t4,v_it,v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd4*k; k1 = nxhd4*ny - joff + nn; joff += nn; /* for (jj = 0; jj < 3; jj++) { */ /* t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[k1]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,177); /* f[jj+k1] = conjf(f[jj+joff] - t1); */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(63),v_t2,v_t1); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(42), v_zero,v_t3); _mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3); /* f[jj+joff] += t1; */ /* } */ v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(63),v_t2,v_t1); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t2); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd4*k1 + nn; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+k1]); /* f[4*i+k1] = f[4*i+joff]; */ /* f[1+4*i+k1] = f[1+4*i+joff]; */ /* f[2+4*i+k1] = f[2+4*i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+joff]); _mm512_store_ps((float *)&f[4*i+k1],v_t2); /* f[4*i+joff] = t1; */ /* f[1+4*i+joff] = t2; */ /* f[2+4*i+joff] = t3; */ _mm512_store_ps((float *)&f[4*i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+k1]; t2 = f[1+4*i+k1]; t3 = f[2+4*i+k1]; f[4*i+k1] = f[4*i+joff]; f[1+4*i+k1] = f[1+4*i+joff]; f[2+4*i+k1] = f[2+4*i+joff]; f[4*i+joff] = t1; f[1+4*i+joff] = t2; f[2+4*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd4*(j + k1) + nn; j2 = nxhd4*(j + k2) + nn; t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ /* t4 = t1*f[2+4*i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j2],v_t4); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ /* f[2+4*i+j1] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+j2]; t3 = t1*f[1+4*i+j2]; t4 = t1*f[2+4*i+j2]; f[4*i+j2] = f[4*i+j1] - t2; f[1+4*i+j2] = f[1+4*i+j1] - t3; f[2+4*i+j2] = f[2+4*i+j1] - t4; f[4*i+j1] += t2; f[1+4*i+j1] += t3; f[2+4*i+j1] += t4; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* for (jj = 0; jj < 3; jj++) { */ /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff+8]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[jj+4*j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[jj+4*j+joff] = t1 + t2; */ v_t3 = _mm512_add_ps(v_t1,v_t2); /* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */ /* } */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[4*j+joff], _mm512_int2mask(65280),v_t3); _mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff], _mm512_int2mask(255),v_t4); _mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8], _mm512_int2mask(255),v_t4); } else { _mm512_store_ps((float *)&f[4*j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4); _mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+4*(nxh-j)+joff]); t1 = f[jj+4*j+joff] + t2; t2 = (f[jj+4*j+joff] - t2)*t3; f[jj+4*j+joff] = t1 + t2; f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); } } } for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* for (jj = 0; jj < 3; jj++) { */ /* f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]); */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[4*nxhh+joff]); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero, v_t1); v_t1 = _mm512_add_ps(v_t1,v_t1); _mm512_mask_store_ps((float *)&f[4*nxhh+joff], _mm512_int2mask(63),v_t1); /* f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) */ /* + (crealf(f[jj+joff]) */ /* - cimagf(f[jj+joff]))*_Complex_I; */ /* } */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t3); } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm512_mask_loadunpacklo_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff]); v_t1 = _mm512_mask_loadunpackhi_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff+8]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm512_mask_loadunpacklo_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff]); v_t2 = _mm512_mask_loadunpackhi_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff+8]); _mm512_mask_packstorelo_ps((float *)&f[4*j1+joff], _mm512_int2mask(255),v_t2); _mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8], _mm512_int2mask(255),v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm512_mask_packstorelo_ps((float *)&f[4*j+joff], _mm512_int2mask(255),v_t1); _mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8], _mm512_int2mask(255),v_t1); } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 2*(ns/2); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = conjf(sct[kmr*j]); */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690), v_zero,v_t1); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ /* t4 = t1*f[2+4*j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k2+joff],v_t4); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = conjf(sct[kmr*j]); t2 = t1*f[4*j+k2+joff]; t3 = t1*f[1+4*j+k2+joff]; t4 = t1*f[2+4*j+k2+joff]; f[4*j+k2+joff] = f[4*j+k1+joff] - t2; f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; f[4*j+k1+joff] += t2; f[1+4*j+k1+joff] += t3; f[2+4*j+k1+joff] += t4; } } } ns = ns2; } /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* f[3+4*j+joff] = cimagf(f[2+4*j+joff]) */ /* + cimagf(f[3+4*j+joff])*_Complex_I; */ /* at1 = crealf(f[2+4*j+joff]); */ /* f[2+4*j+joff] = cimagf(f[4*j+joff]) */ /* + cimagf(f[1+4*j+joff])*_Complex_I; */ /* at2 = crealf(f[1+4*j+joff]); */ /* f[1+4*j+joff] = at1 + 0.0*_Complex_I; */ /* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */ v_t1 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1); _mm512_store_ps((float *)&f[4*j+joff],v_t1); } /* loop over remaining elements */ for (j = nxhs; j < nxh; j++) { f[3+4*j+joff] = cimagf(f[2+4*j+joff]) + cimagf(f[3+4*j+joff])*_Complex_I; at1 = crealf(f[2+4*j+joff]); f[2+4*j+joff] = cimagf(f[4*j+joff]) + cimagf(f[1+4*j+joff])*_Complex_I; at2 = crealf(f[1+4*j+joff]); f[1+4*j+joff] = at1 + 0.0*_Complex_I; f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; } } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rm3z(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in z are performed f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, three forward fourier transforms in z are performed f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 2 f needs to have 4 components written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd4, nxhyd, ioff; int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr; int i0, i1; int nxhs; float complex t1, t2, t3, t4; __m512 v_zero, v_t1, v_t2, v_t3, v_t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhd4 = 4*nxhd; nxhyd = nxhd4*nyd; nxhs = 2*(nxh/2); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L110; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4,v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd4*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+i1]; */ /* t2 = f[1+4*i+i1]; */ /* t3 = f[2+4*i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+i1]); /* f[4*i+i1] = f[4*i+i0]; */ /* f[1+4*i+i1] = f[1+4*i+i0]; */ /* f[2+4*i+i1] = f[2+4*i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i0]); _mm512_store_ps((float *)&f[4*i+i1],v_t2); /* f[4*i+i0] = t1; */ /* f[1+4*i+i0] = t2; */ /* f[2+4*i+i0] = t3; */ _mm512_store_ps((float *)&f[4*i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+i1]; t2 = f[1+4*i+i1]; t3 = f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0]; f[1+4*i+i1] = f[1+4*i+i0]; f[2+4*i+i1] = f[2+4*i+i0]; f[4*i+i0] = t1; f[1+4*i+i0] = t2; f[2+4*i+i0] = t3; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+i1]; */ /* t3 = t1*f[1+4*i+i1]; */ /* t4 = t1*f[2+4*i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+i1] = f[4*i+i0] - t2; */ /* f[1+4*i+i1] = f[1+4*i+i0] - t3; */ /* f[2+4*i+i1] = f[2+4*i+i0] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i1],v_t4); /* f[4*i+i0] += t2; */ /* f[1+4*i+i0] += t3; */ /* f[2+4*i+i0] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+i1]; t3 = t1*f[1+4*i+i1]; t4 = t1*f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0] - t2; f[1+4*i+i1] = f[1+4*i+i0] - t3; f[2+4*i+i1] = f[2+4*i+i0] - t4; f[4*i+i0] += t2; f[1+4*i+i0] += t3; f[2+4*i+i0] += t4; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; for (jj = 0; jj < 3; jj++) { t1 = f[jj+l1]; f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1) + crealf(f[jj+ll] - t1)*_Complex_I); f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1) + cimagf(f[jj+ll] - t1)*_Complex_I); } } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd4*nyh; i0 = i1 + ll; i1 += l1; for (jj = 0; jj < 3; jj++) { t1 = f[jj+i1]; f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1) + crealf(f[jj+i0] - t1)*_Complex_I); f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1) + cimagf(f[jj+i0] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L110: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I; f[jj+l1] = conjf(f[jj+ll] - t1); f[jj+ll] += t1; } } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd4*nyh; i0 = i1 + ll; i1 += l1; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I; f[jj+i1] = conjf(f[jj+i0] - t1); f[jj+i0] += t1; } } } #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4,v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd4*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+i1]; */ /* t2 = f[1+4*i+i1]; */ /* t3 = f[2+4*i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+i1]); /* f[4*i+i1] = f[4*i+i0]; */ /* f[1+4*i+i1] = f[1+4*i+i0]; */ /* f[2+4*i+i1] = f[2+4*i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i0]); _mm512_store_ps((float *)&f[4*i+i1],v_t2); /* f[4*i+i0] = t1; */ /* f[1+4*i+i0] = t2; */ /* f[2+4*i+i0] = t3; */ _mm512_store_ps((float *)&f[4*i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+i1]; t2 = f[1+4*i+i1]; t3 = f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0]; f[1+4*i+i1] = f[1+4*i+i0]; f[2+4*i+i1] = f[2+4*i+i0]; f[4*i+i0] = t1; f[1+4*i+i0] = t2; f[2+4*i+i0] = t3; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+i1]; */ /* t3 = t1*f[1+4*i+i1]; */ /* t4 = t1*f[2+4*i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+i1] = f[4*i+i0] - t2; */ /* f[1+4*i+i1] = f[1+4*i+i0] - t3; */ /* f[2+4*i+i1] = f[2+4*i+i0] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i1],v_t4); /* f[4*i+i0] += t2; */ /* f[1+4*i+i0] += t3; */ /* f[2+4*i+i0] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+i1]; t3 = t1*f[1+4*i+i1]; t4 = t1*f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0] - t2; f[1+4*i+i1] = f[1+4*i+i0] - t3; f[2+4*i+i1] = f[2+4*i+i0] - t4; f[4*i+i0] += t2; f[1+4*i+i0] += t3; f[2+4*i+i0] += t4; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncwfft3rmx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for real to complex fft, with packed data */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform z fft */ ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); } return; } /*--------------------------------------------------------------------*/ void ckncwfft3rm3(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for 3 2d real to complex ffts, with packed data */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd, nzd,nxhyzd,nxyzhd); /* perform z fft */ ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd, nzd,nxhyzd,nxyzhd); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void ckncgbppush3lt_(float *ppart, float *fxyz, float *bxyz ,int *kpic, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx, *nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1, *mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgbppushf3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { ckncgbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,ek, *idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv, *mx1,*my1,*mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncgrbppush3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgrbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp, *nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1, *my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgrbppushf3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { ckncgrbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek, *idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncgppost3lt_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1) { ckncgppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1); return; } /*--------------------------------------------------------------------*/ void cknc2gppost3lt_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1) { cknc2gppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1); return; } /*--------------------------------------------------------------------*/ void ckncgjppost3lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgjppost3lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*nz,*mx, *my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgrjppost3lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgrjppost3lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*nz, *mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncpporder3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { ckncpporder3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz, *mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncpporderf3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { ckncpporderf3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1, *mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cknccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { cknccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncacguard3l_(float *cu, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { ckncacguard3l(cu,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncaguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { ckncaguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncmpois33_(float complex *q, float complex *fxyz, int *isign, float complex *ffc, float *ax, float *ay, float *az, float *affp, float *we, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh, *nyv,*nzv,*nxhd,*nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void cknccuperp3_(float complex *cu, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv) { cknccuperp3(cu,*nx,*ny,*nz,*nxvh,*nyv,*nzv); return; } /*--------------------------------------------------------------------*/ void ckncibpois33_(float complex *cu, float complex *bxyz, float complex *ffc, float *ci, float *wm, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncibpois33(cu,bxyz,ffc,*ci,wm,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd, *nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncmaxwel3_(float complex *exyz, float complex *bxyz, float complex *cu, float complex *ffc, float *ci, float *dt, float *wf, float *wm, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncmaxwel3(exyz,bxyz,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nz,*nxvh,*nyv, *nzv,*nxhd,*nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncemfield3_(float complex *fxyz, float complex *exyz, float complex *ffc, int *isign, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncemfield3(fxyz,exyz,ffc,*isign,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd, *nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncwfft3rmx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { ckncwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; } /*--------------------------------------------------------------------*/ void ckncwfft3rm3_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { ckncwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; }
GB_unop__identity_bool_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_fc64) // op(A') function: GB (_unop_tran__identity_bool_fc64) // C type: bool // A type: GxB_FC64_t // cast: bool cij = (creal (aij) != 0) || (cimag (aij) != 0) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_fc64) ( bool *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(8*t3+Nx+4,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),128*t4+126),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
test_loops_omp.c
/** @file class.c * Julien Lesgourgues, 17.04.2011 */ /* this main calls CLASS several times in a loop, with different input parameters. It illustrates how the code could be interfaced with a parameter extraction code. */ /* JL 17.03.2016: implemented here nested openMP loops. The user chooses how many instances of CLASS are run in parallel (by setting the variable number_of_class_instances below). Each of them uses a number of thread such that all cores are used. */ #include "class.h" int class( struct file_content *pfc, struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, struct primordial * ppm, struct nonlinear * pnl, struct transfers * ptr, struct spectra * psp, struct lensing * ple, struct output * pop, int l_max, double ** cl, ErrorMsg errmsg) { int l; class_call(input_init(pfc,ppr,pba,pth,ppt,ptr,ppm,psp,pnl,ple,pop,errmsg), errmsg, errmsg); class_call(background_init(ppr,pba), pba->error_message, errmsg); class_call(thermodynamics_init(ppr,pba,pth), pth->error_message, errmsg); class_call(perturb_init(ppr,pba,pth,ppt), ppt->error_message, errmsg); class_call(primordial_init(ppr,ppt,ppm), ppm->error_message, errmsg); class_call(nonlinear_init(ppr,pba,pth,ppt,ppm,pnl), pnl->error_message, errmsg); class_call(transfer_init(ppr,pba,pth,ppt,pnl,ptr), ptr->error_message, errmsg); class_call(spectra_init(ppr,pba,ppt,ppm,pnl,ptr,psp), psp->error_message, errmsg); class_call(lensing_init(ppr,ppt,psp,pnl,ple), ple->error_message, errmsg); /****** write the Cl values in the input array cl[l] *******/ for (l=2; l <= l_max; l++) { class_call(output_total_cl_at_l(psp,ple,pop,(double)l,cl[l]), psp->error_message, errmsg); } /****** all calculations done, now free the structures ******/ class_call(lensing_free(ple), ple->error_message, errmsg); class_call(spectra_free(psp), psp->error_message, errmsg); class_call(transfer_free(ptr), ptr->error_message, errmsg); class_call(nonlinear_free(pnl), pnl->error_message, errmsg); class_call(primordial_free(ppm), ppm->error_message, errmsg); class_call(perturb_free(ppt), ppt->error_message, errmsg); class_call(thermodynamics_free(pth), pth->error_message, errmsg); class_call(background_free(pba), pba->error_message, errmsg); return _SUCCESS_; } int main() { /* shared variable that will be common to all CLASS instances */ int i; int l,l_max; int num_ct_max=7; int num_loops=10; struct file_content fc; ErrorMsg errmsg_parser; int total_number_of_threads; int number_of_class_instances; int number_of_threads_inside_class; int index_ct_tt; int index_ct_ee; int index_ct_te; /* dealing with the openMP part (number of instances, number of threads per instance...) */ #ifdef _OPENMP /* Determine the number of threads, class instances and nested threads */ total_number_of_threads = omp_get_max_threads(); /* User-fixed number of CLASS instances to be run in parallel (Total number of threads should be dividable by this number) */ number_of_class_instances=2; if ((total_number_of_threads % number_of_class_instances) != 0) printf("The total number of threads, %d, is not a mutiple of the requested number of CLASS instances, %d\n",total_number_of_threads,number_of_class_instances); number_of_threads_inside_class = total_number_of_threads/number_of_class_instances; /* inferred number of threads per instance */ printf("# Total number of available threads = %d, used to run\n", total_number_of_threads); printf("# -> %d CLASS executables in parallel\n", number_of_class_instances); printf("# -> %d threads inside each CLASS executables\n", number_of_threads_inside_class); /* Turn on nested parallelism */ omp_set_nested(1); #endif /* choose a value of l_max in C_l's */ l_max=3000; /* all parameters for which we don't want to keep default values should be passed to the code through a file_content structure. Create such a structure with the size you need: 10 in this exemple */ parser_init(&fc,10,"",errmsg_parser); /* assign values to these 9 parameters. Some will be fixed, some will be varied in the loop. */ strcpy(fc.name[0],"output"); strcpy(fc.value[0],"tCl,pCl,lCl"); strcpy(fc.name[1],"l_max_scalars"); sprintf(fc.value[1],"%d",l_max); strcpy(fc.name[2],"lensing"); sprintf(fc.value[2],"yes"); strcpy(fc.name[3],"H0"); sprintf(fc.value[3],"%e",72.); strcpy(fc.name[4],"omega_b"); sprintf(fc.value[4],"%e",0.024); strcpy(fc.name[5],"omega_cdm"); sprintf(fc.value[5],"%e",0.05); strcpy(fc.name[6],"z_reio"); sprintf(fc.value[6],"%e",10.); strcpy(fc.name[7],"A_s"); sprintf(fc.value[7],"%e",2.3e-9); strcpy(fc.name[8],"n_s"); sprintf(fc.value[8],"%e",1.); strcpy(fc.name[9],"perturbations_verbose"); sprintf(fc.value[9],"%d",0); // Trick: set to 2 to cross-check actual number of threads per CLASS instance /* Create an array of Cl's where all results will be stored for each parameter value in the loop */ double *** cl; cl = malloc(num_loops*sizeof(double**)); /* Create one thread for each instance of CLASS */ #pragma omp parallel num_threads(number_of_class_instances) { /* set the number of threads inside each CLASS instance */ #ifdef _OPENMP omp_set_num_threads(number_of_threads_inside_class); #endif /* for each thread/instance, create all CLASS input/output structures (these variables are being declared insode the parallel zone, hence they are openMP private variables) */ struct precision pr; /* for precision parameters */ struct background ba; /* for cosmological background */ struct thermo th; /* for thermodynamics */ struct perturbs pt; /* for source functions */ struct transfers tr; /* for transfer functions */ struct primordial pm; /* for primordial spectra */ struct spectra sp; /* for output spectra */ struct nonlinear nl; /* for non-linear spectra */ struct lensing le; /* for lensed spectra */ struct output op; /* for output files */ ErrorMsg errmsg; /* for error messages */ struct file_content fc_local; int j,iam; /* copy the shared file content into the local file content used by each instance */ parser_init(&fc_local,fc.size,"",errmsg); for (j=0; j < fc.size; j++) { strcpy(fc_local.value[j],fc.value[j]); strcpy(fc_local.name[j],fc.name[j]); fc_local.read[j]=fc.read[j]; } /* loop over (num_loops) values of some parameters: in this exemple, omega_b */ #pragma omp for schedule(static,1) for (i=0; i<=num_loops; i++) { #ifdef _OPENMP iam=omp_get_thread_num(); #else iam=0; #endif /* assign one value to omega_b */ sprintf(fc_local.value[4],"%e",0.01+i*0.002); printf("# %d\tthread=%d : running with omega_b = %s\n",i,iam,fc_local.value[4]); /* allocate the array where the Cl's calculated by one instance will be written (we could add another array with P(k), or extract other results from the code - here we assume that we are interested in the C_l's only */ cl[i]=malloc((l_max+1)*sizeof(double*)); for (l=0;l<=l_max;l++) cl[i][l]=malloc(num_ct_max*sizeof(double)); /* calls class and return the C_l's*/ if (class(&fc_local,&pr,&ba,&th,&pt,&pm,&nl,&tr,&sp,&le,&op,l_max,cl[i],errmsg) == _FAILURE_) { printf("\n\nError in class \n=>%s\n",errmsg); //return _FAILURE_; } /* if this is the first call, extract dynamically the value of indices used in the output */ if ((i==0) && (iam==0)) { index_ct_tt=sp.index_ct_tt; index_ct_te=sp.index_ct_te; index_ct_ee=sp.index_ct_ee; } } // end of loop over parameters } // end parallel zone /* write in file the lensed C_l^TT, C_l^EE, C_l^TE's obtained in all runs */ FILE * out=fopen("output/test_loops_omp.dat","w"); for (i=0; i<num_loops; i++) { for (l=2;l<=l_max;l++) { fprintf(out,"%d %e %e %e\n", l, l*(l+1)*cl[i][l][index_ct_tt], l*(l+1)*cl[i][l][index_ct_ee], l*(l+1)*cl[i][l][index_ct_te]); } fprintf(out,"\n"); } /* free Cl's array */ for (i=0; i<num_loops; i++) { for (l=0;l<l_max;l++) { free(cl[i][l]); } free(cl[i]); } free(cl); return _SUCCESS_; }
GB_binop__bclr_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int64) // C=scalar+B GB (_bind1st__bclr_int64) // C=scalar+B' GB (_bind1st_tran__bclr_int64) // C=A+scalar GB (_bind2nd__bclr_int64) // C=A'+scalar GB (_bind2nd_tran__bclr_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_BITCLR (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, int64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT64 || GxB_NO_BCLR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bclr_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, int64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bclr_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, int64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bclr_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
CALPHADFreeEnergyFunctionsTernary.h
#ifndef included_CALPHADFreeEnergyFunctionsTernary #define included_CALPHADFreeEnergyFunctionsTernary #include "CALPHADSpeciesPhaseGibbsEnergy.h" #include "InterpolationType.h" #include "Phases.h" #include "datatypes.h" #include "functions.h" #include <boost/property_tree/ptree.hpp> #include <fstream> #include <iostream> #include <math.h> namespace Thermo4PFM { class CALPHADFreeEnergyFunctionsTernary { public: CALPHADFreeEnergyFunctionsTernary(boost::property_tree::ptree& input_db, boost::optional<boost::property_tree::ptree&> newton_db, const EnergyInterpolationType energy_interp_func_type, const ConcInterpolationType conc_interp_func_type); ~CALPHADFreeEnergyFunctionsTernary(){}; double computeFreeEnergy(const double temperature, const double* const conc, const PhaseIndex pi, const bool gp = false); void computeDerivFreeEnergy(const double temperature, const double* const conc, const PhaseIndex pi, double* deriv); void computeSecondDerivativeFreeEnergy(const double temp, const double* const conc, const PhaseIndex pi, double* d2fdc2); bool computeCeqT(const double temperature, double* ceq, const int maxits = 20, const bool verbose = false); /// Compute compositions and phase fractions ate ends of tie line /// passing through nominal composition (c0,c1) bool computeTieLine(const double temperature, const double c0, const double c1, double* ceq, const int maxits = 20, const bool verbose = false); void preRunDiagnostics(const double T0 = 300., const double T1 = 3000.); int computePhaseConcentrations(const double temperature, const double* const conc, const double* const phi, double* x); void energyVsPhiAndC(const double temperature, const double* const ceq, const bool found_ceq, const double phi_well_scale, const int npts_phi = 51, const int npts_c = 50); // number of compositions to use (>1) void printEnergyVsComposition( const double temperature, std::ostream& os, const int npts = 100); double fchem(const double* const phi, const double* const conc, const double temperature); void printEnergyVsPhiHeader(const double temperature, const int nphi, const int nc0, const int nc1, const double c0min, const double c0max, const double c1min, const double c1max, std::ostream& os) const; void printEnergyVsPhi(const double* const conc, const double temperature, const double phi_well_scale, const int npts, std::ostream& os); private: EnergyInterpolationType energy_interp_func_type_; ConcInterpolationType conc_interp_func_type_; void readNewtonparameters(boost::property_tree::ptree& newton_db); void computeTdependentParameters(const double temperature, CalphadDataType* L_AB_L, CalphadDataType* L_AC_L, CalphadDataType* L_BC_L, CalphadDataType* L_ABC_L, CalphadDataType* L_AB_S, CalphadDataType* L_AC_S, CalphadDataType* L_BC_S, CalphadDataType* L_ABC_S, CalphadDataType* fA, CalphadDataType* fB, CalphadDataType* fC); char* fenergy_diag_filename_; double newton_tol_; double newton_alpha_; int newton_maxits_; bool newton_verbose_; // Single species energies in each phase // size 3 for species A, B, C CALPHADSpeciesPhaseGibbsEnergy g_species_phaseL_[3]; CALPHADSpeciesPhaseGibbsEnergy g_species_phaseA_[3]; // size 4 for L0, L1, L2, L3, with 2 coefficient for linear expansion in T // a+b*T CalphadDataType LmixABPhaseL_[4][2]; CalphadDataType LmixABPhaseA_[4][2]; CalphadDataType LmixACPhaseL_[4][2]; CalphadDataType LmixACPhaseA_[4][2]; CalphadDataType LmixBCPhaseL_[4][2]; CalphadDataType LmixBCPhaseA_[4][2]; CalphadDataType LmixABCPhaseL_[3][2]; CalphadDataType LmixABCPhaseA_[3][2]; double (*fun_ptr_arr_[3])(const double){ linear_interp_func, pbg_interp_func, harmonic_interp_func }; void readParameters(boost::property_tree::ptree& calphad_db); #ifdef HAVE_OPENMP_OFFLOAD #pragma omp declare target #endif // energy of species "is" in phase L,A,B double getFenergyPhaseL(const short is, const double temperature) { return g_species_phaseL_[is].fenergy(temperature); } double getFenergyPhaseA(const short is, const double temperature) { return g_species_phaseA_[is].fenergy(temperature); } CalphadDataType lmix0ABPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix0ABPhaseL(temperature); case PhaseIndex::phaseA: return lmix0ABPhaseA(temperature); default: return NAN; } } CalphadDataType lmix1ABPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix1ABPhaseL(temperature); case PhaseIndex::phaseA: return lmix1ABPhaseA(temperature); default: return NAN; } } CalphadDataType lmix2ABPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix2ABPhaseL(temperature); case PhaseIndex::phaseA: return lmix2ABPhaseA(temperature); default: return NAN; } } CalphadDataType lmix3ABPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix3ABPhaseL(temperature); case PhaseIndex::phaseA: return lmix3ABPhaseA(temperature); default: return NAN; } } CalphadDataType lmix0ABPhaseL(const double temperature) { return LmixABPhaseL_[0][0] + LmixABPhaseL_[0][1] * temperature; } CalphadDataType lmix1ABPhaseL(const double temperature) { return LmixABPhaseL_[1][0] + LmixABPhaseL_[1][1] * temperature; } CalphadDataType lmix2ABPhaseL(const double temperature) { return LmixABPhaseL_[2][0] + LmixABPhaseL_[2][1] * temperature; } CalphadDataType lmix3ABPhaseL(const double temperature) { return LmixABPhaseL_[3][0] + LmixABPhaseL_[3][1] * temperature; } CalphadDataType lmix0ABPhaseA(const double temperature) { return LmixABPhaseA_[0][0] + LmixABPhaseA_[0][1] * temperature; } CalphadDataType lmix1ABPhaseA(const double temperature) { return LmixABPhaseA_[1][0] + LmixABPhaseA_[1][1] * temperature; } CalphadDataType lmix2ABPhaseA(const double temperature) { return LmixABPhaseA_[2][0] + LmixABPhaseA_[2][1] * temperature; } CalphadDataType lmix3ABPhaseA(const double temperature) { return LmixABPhaseA_[3][0] + LmixABPhaseA_[3][1] * temperature; } CalphadDataType lmix0ACPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix0ACPhaseL(temperature); case PhaseIndex::phaseA: return lmix0ACPhaseA(temperature); default: return NAN; } } CalphadDataType lmix1ACPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix1ACPhaseL(temperature); case PhaseIndex::phaseA: return lmix1ACPhaseA(temperature); default: return NAN; } } CalphadDataType lmix2ACPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix2ACPhaseL(temperature); case PhaseIndex::phaseA: return lmix2ACPhaseA(temperature); default: return NAN; } } CalphadDataType lmix3ACPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix3ACPhaseL(temperature); case PhaseIndex::phaseA: return lmix3ACPhaseA(temperature); default: return NAN; } } CalphadDataType lmix0ACPhaseL(const double temperature) { return LmixACPhaseL_[0][0] + LmixACPhaseL_[0][1] * temperature; } CalphadDataType lmix1ACPhaseL(const double temperature) { return LmixACPhaseL_[1][0] + LmixACPhaseL_[1][1] * temperature; } CalphadDataType lmix2ACPhaseL(const double temperature) { return LmixACPhaseL_[2][0] + LmixACPhaseL_[2][1] * temperature; } CalphadDataType lmix3ACPhaseL(const double temperature) { return LmixACPhaseL_[3][0] + LmixACPhaseL_[3][1] * temperature; } CalphadDataType lmix0ACPhaseA(const double temperature) { return LmixACPhaseA_[0][0] + LmixACPhaseA_[0][1] * temperature; } CalphadDataType lmix1ACPhaseA(const double temperature) { return LmixACPhaseA_[1][0] + LmixACPhaseA_[1][1] * temperature; } CalphadDataType lmix2ACPhaseA(const double temperature) { return LmixACPhaseA_[2][0] + LmixACPhaseA_[2][1] * temperature; } CalphadDataType lmix3ACPhaseA(const double temperature) { return LmixACPhaseA_[3][0] + LmixACPhaseA_[3][1] * temperature; } CalphadDataType lmix0BCPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix0BCPhaseL(temperature); case PhaseIndex::phaseA: return lmix0BCPhaseA(temperature); default: return NAN; } } CalphadDataType lmix1BCPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix1BCPhaseL(temperature); case PhaseIndex::phaseA: return lmix1BCPhaseA(temperature); default: return NAN; } } CalphadDataType lmix2BCPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix2BCPhaseL(temperature); case PhaseIndex::phaseA: return lmix2BCPhaseA(temperature); default: return NAN; } } CalphadDataType lmix3BCPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix3BCPhaseL(temperature); case PhaseIndex::phaseA: return lmix3BCPhaseA(temperature); default: return NAN; } } CalphadDataType lmix0BCPhaseL(const double temperature) { return LmixBCPhaseL_[0][0] + LmixBCPhaseL_[0][1] * temperature; } CalphadDataType lmix1BCPhaseL(const double temperature) { return LmixBCPhaseL_[1][0] + LmixBCPhaseL_[1][1] * temperature; } CalphadDataType lmix2BCPhaseL(const double temperature) { return LmixBCPhaseL_[2][0] + LmixBCPhaseL_[2][1] * temperature; } CalphadDataType lmix3BCPhaseL(const double temperature) { return LmixBCPhaseL_[3][0] + LmixBCPhaseL_[3][1] * temperature; } CalphadDataType lmix0BCPhaseA(const double temperature) { return LmixBCPhaseA_[0][0] + LmixBCPhaseA_[0][1] * temperature; } CalphadDataType lmix1BCPhaseA(const double temperature) { return LmixBCPhaseA_[1][0] + LmixBCPhaseA_[1][1] * temperature; } CalphadDataType lmix2BCPhaseA(const double temperature) { return LmixBCPhaseA_[2][0] + LmixBCPhaseA_[2][1] * temperature; } CalphadDataType lmix3BCPhaseA(const double temperature) { return LmixBCPhaseA_[3][0] + LmixBCPhaseA_[3][1] * temperature; } // ABC CalphadDataType lmix0ABCPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix0ABCPhaseL(temperature); case PhaseIndex::phaseA: return lmix0ABCPhaseA(temperature); default: return NAN; } } CalphadDataType lmix1ABCPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix1ABCPhaseL(temperature); case PhaseIndex::phaseA: return lmix1ABCPhaseA(temperature); default: return NAN; } } CalphadDataType lmix2ABCPhase(const PhaseIndex pi, const double temperature) { switch (pi) { case PhaseIndex::phaseL: return lmix2ABCPhaseL(temperature); case PhaseIndex::phaseA: return lmix2ABCPhaseA(temperature); default: return NAN; } } // ABC liquid CalphadDataType lmix0ABCPhaseL(const double temperature) { return LmixABCPhaseL_[0][0] + LmixABCPhaseL_[0][1] * temperature; } CalphadDataType lmix1ABCPhaseL(const double temperature) { return LmixABCPhaseL_[1][0] + LmixABCPhaseL_[1][1] * temperature; } CalphadDataType lmix2ABCPhaseL(const double temperature) { return LmixABCPhaseL_[2][0] + LmixABCPhaseL_[2][1] * temperature; } // ABC solid CalphadDataType lmix0ABCPhaseA(const double temperature) { return LmixABCPhaseA_[0][0] + LmixABCPhaseA_[0][1] * temperature; } CalphadDataType lmix1ABCPhaseA(const double temperature) { return LmixABCPhaseA_[1][0] + LmixABCPhaseA_[1][1] * temperature; } CalphadDataType lmix2ABCPhaseA(const double temperature) { return LmixABCPhaseA_[2][0] + LmixABCPhaseA_[2][1] * temperature; } #ifdef HAVE_OPENMP_OFFLOAD #pragma omp end declare target #endif void computePhasesFreeEnergies(const double temperature, const double* const hphi, const double conc0, const double conc1, double& fl, double& fa); }; void readLmixTernaryParameters( boost::property_tree::ptree& Lmix_db, CalphadDataType LmixABC[3][2]); } #endif
flush.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int data=10, flag = 0; #pragma omp parallel num_threads(4) { if (omp_get_thread_num()%2==0) { data = 5; #pragma omp flush(flag, data) /* Set flag to release thread 1 */ flag = 1; printf("flag=%d data=%d\n", flag, data); #pragma omp flush(flag) } else if (omp_get_thread_num()==1) { #pragma omp flush(flag, data) data = 15; printf("flag=%d data=%d\n", flag, data); #pragma omp flush(flag, data) } else{ #pragma omp flush(flag, data) printf("flag=%d data=%d\n", flag, data); #pragma omp flush(flag) } printf("Thread %d : flag=%d data=%d\n",omp_get_thread_num(),flag, data); } return 0; }
hardswish_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: renzun@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include <math.h> #include "hardswish_param.h" static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct hardswish_param* hardswish_param = ( struct hardswish_param* )ir_node->op.param_mem; float alpha = hardswish_param->alpha; float beta = hardswish_param->beta; float lower = -beta / alpha; float upper = (1.f / alpha) + lower; int chan_num = (input_tensor->dims[0]) * (input_tensor->dims[1]); int chan_size = (input_tensor->dims[2]) * (input_tensor->dims[3]); float* pdata = ( float* )input_tensor->data; float* pout_data = ( float* )output_tensor->data; int num_thread = exec_graph->num_thread; #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < chan_num; j++) { float* data = pdata + j * chan_size; float* out_data = pout_data + j * chan_size; for (int i = 0; i < chan_size; i++) { if (data[i] < lower) out_data[i] = 0.f; else if (data[i] > upper) out_data[i] = data[i]; else out_data[i] = data[i] * (data[i] * alpha + beta); } } return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { struct ir_node* ir_node = exec_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW) return 0; return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_hardswish_hcl_ops(void* arg) { return register_builtin_node_ops(OP_HARDSWISH, &hcl_node_ops); } static int unreg_hardswish_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_HARDSWISH, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_hardswish_hcl_ops); AUTO_UNREGISTER_OPS(unreg_hardswish_hcl_ops);
irbuilder_unroll_heuristic.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_heuristic( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_HEADER]]: // CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_COND]]: // CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[DOTCOUNT]] // CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP4]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP7]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]] // CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP10]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]] // CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP13]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_INC]]: // CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1 // CHECK-NEXT: br label %[[OMP_LOOP_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_EXIT]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_heuristic(float *a, float *b, float *c, float *d) { #pragma omp unroll for (int i = 0; i < 128; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 128, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
GB_binop__ldexp_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__ldexp_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp32) // C=scalar+B GB (_bind1st__ldexp_fp32) // C=scalar+B' GB (_bind1st_tran__ldexp_fp32) // C=A+scalar GB (_bind2nd__ldexp_fp32) // C=A'+scalar GB (_bind2nd_tran__ldexp_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = ldexpf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ldexpf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP32 || GxB_NO_LDEXP_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ldexp_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ldexp_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ldexp_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ldexp_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = ldexpf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ldexp_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = ldexpf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ldexpf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ldexp_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ldexpf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ldexp_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp6.c
#include<stdio.h> int N; struct State { int col[99], d0[99], d1[99]; }; int set(int r, int c, struct State *s) { if (s->col[c] || s->d0[r+c] || s->d1[N+r-c]) return 0; s->col[c]++; s->d0[r+c]++; s->d1[N+r-c]++; return 1; } void unset(int r, int c, struct State *s) { s->col[c]--; s->d0[r+c]--; s->d1[N+r-c]--; } int q(int r, struct State *s) { if (r >= N) return 1; int c, sum = 0; for (c = 0; c < N; c++) { if (set(r, c, s)) { sum += q(r+1, s); unset(r, c, s); } } return sum; } int main() { int i, j, count[N]; struct State s; scanf("%d", &N); #pragma omp parallel for private(s, j) for (i = 0; i < 14; i++) { if (i >= N/2) continue; for (j = 0; j < N*2; ++j) s.col[j] = s.d0[j] = s.d1[j] = 0; set(0, i, &s); count[i] = q(1, &s); unset(0, i, &s); } for (i = 0; i < N/2; ++i) printf("%d ", count[i]); return 0; }
GB_binop__pair_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_uint16 // A.*B function (eWiseMult): GB_AemultB__pair_uint16 // A*D function (colscale): GB_AxD__pair_uint16 // D*A function (rowscale): GB_DxB__pair_uint16 // C+=B function (dense accum): GB_Cdense_accumB__pair_uint16 // C+=b function (dense accum): GB_Cdense_accumb__pair_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint16 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT16 || GxB_NO_PAIR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
ams.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "float.h" #include "ams.h" #include "_hypre_utilities.hpp" /*-------------------------------------------------------------------------- * hypre_ParCSRRelax * * Relaxation on the ParCSR matrix A with right-hand side f and * initial guess u. Possible values for relax_type are: * * 1 = l1-scaled (or weighted) Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR * 3 = Kaczmarz * 4 = truncated version of 2 (Remark 6.2 in smoothers paper) * x = BoomerAMG relaxation with relax_type = |x| * (16 = Cheby) * * The default value of relax_type is 2. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax( hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Int relax_type, /* relaxation type */ HYPRE_Int relax_times, /* number of sweeps */ HYPRE_Real *l1_norms, /* l1 norms of the rows of A */ HYPRE_Real relax_weight, /* damping coefficient (usually <= 1) */ HYPRE_Real omega, /* SOR parameter (usually in (0,2) */ HYPRE_Real max_eig_est, /* for cheby smoothers */ HYPRE_Real min_eig_est, HYPRE_Int cheby_order, HYPRE_Real cheby_fraction, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *z /* temporary vector */ ) { HYPRE_Int sweep; for (sweep = 0; sweep < relax_times; sweep++) { if (relax_type == 1) /* l1-scaled Jacobi */ { hypre_BoomerAMGRelax(A, f, NULL, 7, 0, relax_weight, 1.0, l1_norms, u, v, z); } else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */ { /* !!! Note: relax_weight and omega flipped !!! */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z, 1, 1 /* symm */); } else #endif { hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, NULL, 0, omega, relax_weight, l1_norms, u, v, z, 1, 1 /* symm */, 0 /* skip diag */, 1, 0); } } else if (relax_type == 3) /* Kaczmarz */ { hypre_BoomerAMGRelax(A, f, NULL, 20, 0, relax_weight, omega, l1_norms, u, v, z); } else /* call BoomerAMG relaxation */ { if (relax_type == 16) { hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1, 0, u, v, z); } else { hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight, omega, l1_norms, u, v, z); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInRangeOf * * Return a vector that belongs to the range of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorInDomainOf * * Return a vector that belongs to the domain of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixColStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockSplit * * Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel * block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) template<HYPRE_Int dir> __global__ void hypreCUDAKernel_ParVectorBlockSplitGather(HYPRE_Int size, HYPRE_Int dim, HYPRE_Real *x0, HYPRE_Real *x1, HYPRE_Real *x2, HYPRE_Real *x) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1,1>(); if (i >= size * dim) { return; } HYPRE_Real *xx[3]; xx[0] = x0; xx[1] = x1; xx[2] = x2; const HYPRE_Int d = i % dim; const HYPRE_Int k = i / dim; if (dir == 0) { xx[d][k] = x[i]; } else if (dir == 1) { x[i] = xx[d][k]; } } #endif HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) ); #endif size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(size_ * dim, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<0>, gDim, bDim, size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data); } else #endif { for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data_[d][i] = x_data[dim*i+d]; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockGather * * Compose a parallel block vector x from dim given sub-vectors * x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParVectorMemoryLocation(x) ); #endif size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(size_ * dim, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParVectorBlockSplitGather<1>, gDim, bDim, size_, dim, x_data_[0], x_data_[1], x_data_[2], x_data); } else #endif { for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data[dim*i+d] = x_data_[d][i]; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGBlockSolve * * Apply the block-diagonal solver diag(B) to the system diag(A) x = b. * Here B is a given BoomerAMG solver for A, while x and b are "block" * parallel vectors. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBlockSolve(void *B, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { HYPRE_Int d, dim = 1; hypre_ParVector *b_[3]; hypre_ParVector *x_[3]; dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A); if (dim == 1) { hypre_BoomerAMGSolve(B, A, b, x); return hypre_error_flag; } for (d = 0; d < dim; d++) { b_[d] = hypre_ParVectorInRangeOf(A); x_[d] = hypre_ParVectorInRangeOf(A); } hypre_ParVectorBlockSplit(b, b_, dim); hypre_ParVectorBlockSplit(x, x_, dim); for (d = 0; d < dim; d++) hypre_BoomerAMGSolve(B, A, b_[d], x_[d]); hypre_ParVectorBlockGather(x, x_, dim); for (d = 0; d < dim; d++) { hypre_ParVectorDestroy(b_[d]); hypre_ParVectorDestroy(x_[d]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFixZeroRows * * For every zero row in the matrix: set the diagonal element to 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFixZeroRowsHost(hypre_ParCSRMatrix *A) { HYPRE_Int i, j; HYPRE_Real l1_norm; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); /* a row will be considered zero if its l1 norm is less than eps */ HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ for (i = 0; i < num_rows; i++) { l1_norm = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm += fabs(A_diag_data[j]); if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm += fabs(A_offd_data[j]); if (l1_norm <= eps) { for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (A_diag_J[j] == i) A_diag_data[j] = 1.0; else A_diag_data[j] = 0.0; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) A_offd_data[j] = 0.0; } } return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_ParCSRMatrixFixZeroRows( HYPRE_Int nrows, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Complex *A_diag_data, HYPRE_Int *A_offd_i, HYPRE_Complex *A_offd_data, HYPRE_Int num_cols_offd) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1,1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ HYPRE_Real l1_norm = 0.0; HYPRE_Int p1, q1, p2 = 0, q2 = 0; if (lane < 2) { p1 = read_only_load(A_diag_i + row_i + lane); if (num_cols_offd) { p2 = read_only_load(A_offd_i + row_i + lane); } } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); if (num_cols_offd) { q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { l1_norm += fabs(A_diag_data[j]); } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { l1_norm += fabs(A_offd_data[j]); } l1_norm = warp_allreduce_sum(l1_norm); if (l1_norm <= eps) { for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { if (row_i == read_only_load(&A_diag_j[j])) { A_diag_data[j] = 1.0; } else { A_diag_data[j] = 0.0; } } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { A_offd_data[j] = 0.0; } } } HYPRE_Int hypre_ParCSRMatrixFixZeroRowsDevice(hypre_ParCSRMatrix *A) { HYPRE_Int nrows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); dim3 bDim, gDim; bDim = hypre_GetDefaultCUDABlockDimension(); gDim = hypre_GetDefaultCUDAGridDimension(nrows, "warp", bDim); HYPRE_CUDA_LAUNCH(hypreCUDAKernel_ParCSRMatrixFixZeroRows, gDim, bDim, nrows, A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_data, num_cols_offd); //hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixFixZeroRowsDevice(A); } else #endif { return hypre_ParCSRMatrixFixZeroRowsHost(A); } } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) struct l1_norm_op1 : public thrust::binary_function<HYPRE_Complex, HYPRE_Complex, HYPRE_Complex> { __host__ __device__ HYPRE_Complex operator()(HYPRE_Complex &x, HYPRE_Complex &y) const { return x <= 4.0/3.0 * y ? y : x; } }; #endif HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_MemoryLocation memory_location_l1 = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( memory_location_l1 ); if (exec == HYPRE_EXEC_HOST) { HYPRE_Int num_threads = hypre_NumThreads(); if (num_threads > 1) { return hypre_ParCSRComputeL1NormsThreads(A, option, num_threads, cf_marker, l1_norm_ptr); } } HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_l1); HYPRE_MemoryLocation memory_location_tmp = exec == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; HYPRE_Real *diag_tmp = NULL; HYPRE_Int *cf_marker_offd = NULL, *cf_marker_dev = NULL; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) { cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, memory_location_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate_v2(11, comm_pkg, HYPRE_MEMORY_HOST, int_buf_data, memory_location_tmp, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (exec == HYPRE_EXEC_DEVICE) { cf_marker_dev = hypre_TAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(cf_marker_dev, cf_marker, HYPRE_Int, num_rows, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); } else { cf_marker_dev = cf_marker; } } if (option == 1) { /* Set the l1 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, cf_marker_dev, cf_marker_dev, l1_norm, 1, 1.0, "set"); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add"); } } else if (option == 2) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); /* Add the l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 1.0, "add"); } } else if (option == 3) { /* Set the CF l2 norm of the diag part */ hypre_CSRMatrixComputeRowSum(A_diag, NULL, NULL, l1_norm, 2, 1.0, "set"); /* Add the CF l2 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, NULL, NULL, l1_norm, 2, 1.0, "add"); } } else if (option == 4) { /* Set the abs(diag) element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 1); diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); hypre_TMemcpy(diag_tmp, l1_norm, HYPRE_Real, num_rows, memory_location_tmp, memory_location_l1); /* Add the scaled l1 norm of the offd part */ if (num_cols_offd) { hypre_CSRMatrixComputeRowSum(A_offd, cf_marker_dev, cf_marker_offd, l1_norm, 1, 0.5, "add"); } /* Truncate according to Remark 6.2 */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, l1_norm_op1() ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] <= 4.0/3.0 * diag_tmp[i]) { l1_norm[i] = diag_tmp[i]; } } } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, l1_norm, 0); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( exec == HYPRE_EXEC_DEVICE) { thrust::identity<HYPRE_Complex> identity; HYPRE_THRUST_CALL( replace_if, l1_norm, l1_norm + num_rows, thrust::not1(identity), 1.0 ); } else #endif { for (i = 0; i < num_rows; i++) { if (l1_norm[i] == 0.0) { l1_norm[i] = 1.0; } } } *l1_norm_ptr = l1_norm; return hypre_error_flag; } /* Handle negative definite matrices */ if (!diag_tmp) { diag_tmp = hypre_TAlloc(HYPRE_Real, num_rows, memory_location_tmp); } /* Set the diag element */ hypre_CSRMatrixExtractDiagonal(A_diag, diag_tmp, 0); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform_if, l1_norm, l1_norm + num_rows, diag_tmp, l1_norm, thrust::negate<HYPRE_Real>(), is_negative<HYPRE_Real>() ); //bool any_zero = HYPRE_THRUST_CALL( any_of, l1_norm, l1_norm + num_rows, thrust::not1(thrust::identity<HYPRE_Complex>()) ); bool any_zero = 0.0 == HYPRE_THRUST_CALL( reduce, l1_norm, l1_norm + num_rows, 1.0, thrust::minimum<HYPRE_Real>() ); if ( any_zero ) { hypre_error_in_arg(1); } } else #endif { for (i = 0; i < num_rows; i++) { if (diag_tmp[i] < 0.0) { l1_norm[i] = -l1_norm[i]; } } for (i = 0; i < num_rows; i++) { /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } if (exec == HYPRE_EXEC_DEVICE) { hypre_TFree(cf_marker_dev, HYPRE_MEMORY_DEVICE); } hypre_TFree(cf_marker_offd, memory_location_tmp); hypre_TFree(diag_tmp, memory_location_tmp); *l1_norm_ptr = l1_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDiagRows * * For every row containing only a diagonal element: set it to d. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_ParCSRMatrixSetDiagRows(HYPRE_Int nrows, HYPRE_Int *A_diag_I, HYPRE_Int *A_diag_J, HYPRE_Complex *A_diag_data, HYPRE_Int *A_offd_I, HYPRE_Int num_cols_offd, HYPRE_Real d) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1,1>(); if (i >= nrows) { return; } HYPRE_Int j = read_only_load(&A_diag_I[i]); if ( (read_only_load(&A_diag_I[i+1]) == j+1) && (read_only_load(&A_diag_J[j]) == i) && (!num_cols_offd || (read_only_load(&A_offd_I[i+1]) == read_only_load(&A_offd_I[i]))) ) { A_diag_data[j] = d; } } #endif HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(num_rows, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ParCSRMatrixSetDiagRows, gDim, bDim, num_rows, A_diag_I, A_diag_J, A_diag_data, A_offd_I, num_cols_offd, d); } else #endif { for (i = 0; i < num_rows; i++) { j = A_diag_I[i]; if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) && (!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i]))) { A_diag_data[j] = d; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSCreate * * Allocate the AMS solver structure. *--------------------------------------------------------------------------*/ void * hypre_AMSCreate() { hypre_AMSData *ams_data; ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST); /* Default parameters */ ams_data -> dim = 3; /* 3D problem */ ams_data -> maxit = 20; /* perform at most 20 iterations */ ams_data -> tol = 1e-6; /* convergence tolerance */ ams_data -> print_level = 1; /* print residual norm at each step */ ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */ ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */ ams_data -> A_relax_times = 1; /* one relaxation sweep */ ams_data -> A_relax_weight = 1.0; /* damping parameter */ ams_data -> A_omega = 1.0; /* SSOR coefficient */ ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */ ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */ ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_G_theta = 0.25; /* strength threshold */ ams_data -> B_G_interp_type = 0; /* interpolation type */ ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_Pi_theta = 0.25; /* strength threshold */ ams_data -> B_Pi_interp_type = 0; /* interpolation type */ ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> beta_is_zero = 0; /* the problem has a mass term */ /* By default, do l1-GS smoothing on the coarsest grid */ ams_data -> B_G_coarse_relax_type = 8; ams_data -> B_Pi_coarse_relax_type = 8; /* The rest of the fields are initialized using the Set functions */ ams_data -> A = NULL; ams_data -> G = NULL; ams_data -> A_G = NULL; ams_data -> B_G = 0; ams_data -> Pi = NULL; ams_data -> A_Pi = NULL; ams_data -> B_Pi = 0; ams_data -> x = NULL; ams_data -> y = NULL; ams_data -> z = NULL; ams_data -> Gx = NULL; ams_data -> Gy = NULL; ams_data -> Gz = NULL; ams_data -> r0 = NULL; ams_data -> g0 = NULL; ams_data -> r1 = NULL; ams_data -> g1 = NULL; ams_data -> r2 = NULL; ams_data -> g2 = NULL; ams_data -> zz = NULL; ams_data -> Pix = NULL; ams_data -> Piy = NULL; ams_data -> Piz = NULL; ams_data -> A_Pix = NULL; ams_data -> A_Piy = NULL; ams_data -> A_Piz = NULL; ams_data -> B_Pix = 0; ams_data -> B_Piy = 0; ams_data -> B_Piz = 0; ams_data -> interior_nodes = NULL; ams_data -> G0 = NULL; ams_data -> A_G0 = NULL; ams_data -> B_G0 = 0; ams_data -> projection_frequency = 5; ams_data -> A_l1_norms = NULL; ams_data -> A_max_eig_est = 0; ams_data -> A_min_eig_est = 0; ams_data -> owns_Pi = 1; ams_data -> owns_A_G = 0; ams_data -> owns_A_Pi = 0; return (void *) ams_data; } /*-------------------------------------------------------------------------- * hypre_AMSDestroy * * Deallocate the AMS solver structure. Note that the input data (given * through the Set functions) is not destroyed. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (!ams_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (ams_data -> owns_A_G) if (ams_data -> A_G) hypre_ParCSRMatrixDestroy(ams_data -> A_G); if (!ams_data -> beta_is_zero) if (ams_data -> B_G) HYPRE_BoomerAMGDestroy(ams_data -> B_G); if (ams_data -> owns_Pi && ams_data -> Pi) hypre_ParCSRMatrixDestroy(ams_data -> Pi); if (ams_data -> owns_A_Pi) if (ams_data -> A_Pi) hypre_ParCSRMatrixDestroy(ams_data -> A_Pi); if (ams_data -> B_Pi) HYPRE_BoomerAMGDestroy(ams_data -> B_Pi); if (ams_data -> owns_Pi && ams_data -> Pix) hypre_ParCSRMatrixDestroy(ams_data -> Pix); if (ams_data -> A_Pix) hypre_ParCSRMatrixDestroy(ams_data -> A_Pix); if (ams_data -> B_Pix) HYPRE_BoomerAMGDestroy(ams_data -> B_Pix); if (ams_data -> owns_Pi && ams_data -> Piy) hypre_ParCSRMatrixDestroy(ams_data -> Piy); if (ams_data -> A_Piy) hypre_ParCSRMatrixDestroy(ams_data -> A_Piy); if (ams_data -> B_Piy) HYPRE_BoomerAMGDestroy(ams_data -> B_Piy); if (ams_data -> owns_Pi && ams_data -> Piz) hypre_ParCSRMatrixDestroy(ams_data -> Piz); if (ams_data -> A_Piz) hypre_ParCSRMatrixDestroy(ams_data -> A_Piz); if (ams_data -> B_Piz) HYPRE_BoomerAMGDestroy(ams_data -> B_Piz); if (ams_data -> r0) hypre_ParVectorDestroy(ams_data -> r0); if (ams_data -> g0) hypre_ParVectorDestroy(ams_data -> g0); if (ams_data -> r1) hypre_ParVectorDestroy(ams_data -> r1); if (ams_data -> g1) hypre_ParVectorDestroy(ams_data -> g1); if (ams_data -> r2) hypre_ParVectorDestroy(ams_data -> r2); if (ams_data -> g2) hypre_ParVectorDestroy(ams_data -> g2); if (ams_data -> zz) hypre_ParVectorDestroy(ams_data -> zz); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> A); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> G0); if (ams_data -> A_G0) hypre_ParCSRMatrixDestroy(ams_data -> A_G0); if (ams_data -> B_G0) HYPRE_BoomerAMGDestroy(ams_data -> B_G0); hypre_SeqVectorDestroy(ams_data -> A_l1_norms); /* G, x, y ,z, Gx, Gy and Gz are not destroyed */ if (ams_data) { hypre_TFree(ams_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDimension * * Set problem dimension (2 or 3). By default we assume dim = 3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDimension(void *solver, HYPRE_Int dim) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (dim != 1 && dim != 2 && dim != 3) hypre_error_in_arg(2); ams_data -> dim = dim; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDiscreteGradient * * Set the discrete gradient matrix G. * This function should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver, hypre_ParCSRMatrix *G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> G = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCoordinateVectors * * Set the x, y and z coordinates of the vertices in the mesh. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver, hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector *z) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> x = x; ams_data -> y = y; ams_data -> z = z; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetEdgeConstantVectors * * Set the vectors Gx, Gy and Gz which give the representations of * the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the * edge element basis. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Gx = Gx; ams_data -> Gy = Gy; ams_data -> Gz = Gz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInterpolations * * Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz]. * * This function is generally intended to be used only for high-order Nedelec * discretizations (in the lowest order case, Pi is constructed internally in * AMS from the discreet gradient matrix and the coordinates of the vertices), * though it can also be used in the lowest-order case or for other types of * discretizations (e.g. ones based on the second family of Nedelec elements). * * By definition, Pi is the matrix representation of the linear operator that * interpolates (high-order) vector nodal finite elements into the (high-order) * Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0) * and similarly for Piy and Piz. Note that all these operators depend on the * choice of the basis and degrees of freedom in the high-order spaces. * * The column numbering of Pi should be node-based, i.e. the x/y/z components of * the first node (vertex or high-order dof) should be listed first, followed by * the x/y/z components of the second node and so on (see the documentation of * HYPRE_BoomerAMGSetDofFunc). * * If used, this function should be called before hypre_AMSSetup() and there is * no need to provide the vertex coordinates. Furthermore, only one of the sets * {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide * both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with * cycle_type > 10, will be unavailable. Similarly, AMS cycles based on * monolithic Pi (cycle_type < 10) require that Pi is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInterpolations(void *solver, hypre_ParCSRMatrix *Pi, hypre_ParCSRMatrix *Pix, hypre_ParCSRMatrix *Piy, hypre_ParCSRMatrix *Piz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Pi = Pi; ams_data -> Pix = Pix; ams_data -> Piy = Piy; ams_data -> Piz = Piz; ams_data -> owns_Pi = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * alpha (the curl-curl term coefficient in the Maxwell problem). * * If this function is called, the coarse space solver on the range * of Pi^T is a block-diagonal version of A_Pi. If this function is not * called, the coarse space solver on the range of Pi^T is constructed * as Pi^T A Pi in hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_Pi) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_Pi = A_Pi; /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * beta (the mass term coefficient in the Maxwell problem). * * This function call is optional - if not given, the Poisson matrix will * be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume * that beta is 0 and use two-level (instead of three-level) methods. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_G = A_G; if (!A_G) ams_data -> beta_is_zero = 1; else { /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInteriorNodes * * Set the list of nodes which are interior to the zero-conductivity region. * A node is interior if interior_nodes[i] == 1.0. * * Should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInteriorNodes(void *solver, hypre_ParVector *interior_nodes) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> interior_nodes = interior_nodes; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetProjectionFrequency * * How often to project the r.h.s. onto the compatible sub-space Ker(G0^T), * when iterating with the solver. * * The default value is every 5th iteration. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver, HYPRE_Int projection_frequency) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> projection_frequency = projection_frequency; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetMaxIter * * Set the maximum number of iterations in the three-level method. * The default value is 20. To use the AMS solver as a preconditioner, * set maxit to 1, tol to 0.0 and print_level to 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetMaxIter(void *solver, HYPRE_Int maxit) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> maxit = maxit; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetTol * * Set the convergence tolerance (if the method is used as a solver). * The default value is 1e-6. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetTol(void *solver, HYPRE_Real tol) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> tol = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCycleType * * Choose which three-level solver to use. Possible values are: * * 1 = 3-level multipl. solver (01210) <-- small solution time * 2 = 3-level additive solver (0+1+2) * 3 = 3-level multipl. solver (02120) * 4 = 3-level additive solver (010+2) * 5 = 3-level multipl. solver (0102010) <-- small solution time * 6 = 3-level additive solver (1+020) * 7 = 3-level multipl. solver (0201020) <-- small number of iterations * 8 = 3-level additive solver (0(1+2)0) <-- small solution time * 9 = 3-level multipl. solver (01210) with discrete divergence * 11 = 5-level multipl. solver (013454310) <-- small solution time, memory * 12 = 5-level additive solver (0+1+3+4+5) * 13 = 5-level multipl. solver (034515430) <-- small solution time, memory * 14 = 5-level additive solver (01(3+4+5)10) * 20 = 2-level multipl. solver (0[12]0) * * 0 = a Hiptmair-like smoother (010) * * The default value is 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCycleType(void *solver, HYPRE_Int cycle_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> cycle_type = cycle_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetPrintLevel * * Control how much information is printed during the solution iterations. * The defaut values is 1 (print residual norm at each step). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetPrintLevel(void *solver, HYPRE_Int print_level) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> print_level = print_level; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetSmoothingOptions * * Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver, HYPRE_Int A_relax_type, HYPRE_Int A_relax_times, HYPRE_Real A_relax_weight, HYPRE_Real A_omega) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_relax_type = A_relax_type; ams_data -> A_relax_times = A_relax_times; ams_data -> A_relax_weight = A_relax_weight; ams_data -> A_omega = A_omega; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetChebySmoothingOptions * AB: note: this could be added to the above, * but I didn't want to change parameter list) * Set parameters for chebyshev smoother for A. Default values: 2,.3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver, HYPRE_Int A_cheby_order, HYPRE_Int A_cheby_fraction) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_cheby_order = A_cheby_order; ams_data -> A_cheby_fraction = A_cheby_fraction; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGOptions * * Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver, HYPRE_Int B_Pi_coarsen_type, HYPRE_Int B_Pi_agg_levels, HYPRE_Int B_Pi_relax_type, HYPRE_Real B_Pi_theta, HYPRE_Int B_Pi_interp_type, HYPRE_Int B_Pi_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type; ams_data -> B_Pi_agg_levels = B_Pi_agg_levels; ams_data -> B_Pi_relax_type = B_Pi_relax_type; ams_data -> B_Pi_theta = B_Pi_theta; ams_data -> B_Pi_interp_type = B_Pi_interp_type; ams_data -> B_Pi_Pmax = B_Pi_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_Pi. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver, HYPRE_Int B_Pi_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *)solver; ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGOptions * * Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver, HYPRE_Int B_G_coarsen_type, HYPRE_Int B_G_agg_levels, HYPRE_Int B_G_relax_type, HYPRE_Real B_G_theta, HYPRE_Int B_G_interp_type, HYPRE_Int B_G_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarsen_type = B_G_coarsen_type; ams_data -> B_G_agg_levels = B_G_agg_levels; ams_data -> B_G_relax_type = B_G_relax_type; ams_data -> B_G_theta = B_G_theta; ams_data -> B_G_interp_type = B_G_interp_type; ams_data -> B_G_Pmax = B_G_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_G. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver, HYPRE_Int B_G_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePi * * Construct the Pi interpolation matrix, which maps the space of vector * linear finite elements to the space of edge finite elements. * * The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z], * where each block has the same sparsity structure as G, and the entries * can be computed from the vectors Gx, Gy, Gz. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputePi_copy1(HYPRE_Int nnz, HYPRE_Int dim, HYPRE_Int *j_in, HYPRE_Int *j_out) { const HYPRE_Int i = hypre_cuda_get_grid_thread_id<1,1>(); if (i < nnz) { const HYPRE_Int j = dim * i; for (HYPRE_Int d = 0; d < dim; d++) { j_out[j+d] = dim * read_only_load(&j_in[i]) + d; } } } __global__ void hypreCUDAKernel_AMSComputePi_copy2(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_out) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1,1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0; const HYPRE_Int k = j * dim; for (HYPRE_Int d = 0; d < dim; d++) { data_out[k+d] = v * G[d]; } } } #endif HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pi_ptr) { hypre_ParCSRMatrix *Pi; /* Compute Pi = [Pi_x, Pi_y, Pi_z] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); col_starts_size = 2; col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i]; Pi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pi) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(Pi) = 1; hypre_ParCSRMatrixInitialize(Pi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 2) Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G), hypre_ParCSRMatrixMemoryLocation(Pi) ); #endif /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi); HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag); HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag); HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, G_diag_I, G_diag_I + G_diag_nrows + 1, Pi_diag_I, dim * _1 ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_diag_nnz, dim, G_diag_J, Pi_diag_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, Pi_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) Pi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; if (dim >= 2) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi); HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd); HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd); HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( transform, G_offd_I, G_offd_I + G_offd_nrows + 1, Pi_offd_I, dim * _1 ); } dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_offd_nnz, dim, G_offd_J, Pi_offd_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy2, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, Pi_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) Pi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; if (dim >= 2) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim * G_cmap[i] + (HYPRE_BigInt)d; } } *Pi_ptr = Pi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePixyz * * Construct the components Pix, Piy, Piz of the interpolation matrix Pi, * which maps the space of vector linear finite elements to the space of * edge finite elements. * * The construction is based on the fact that each component has the same * sparsity structure as G, and the entries can be computed from the vectors * Gx, Gy, Gz. *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputePixyz_copy(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_x_out, HYPRE_Real *data_y_out, HYPRE_Real *data_z_out ) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1,1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3], *Odata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; Odata[0] = data_x_out; Odata[1] = data_y_out; Odata[2] = data_z_out; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real v = data_in ? fabs(read_only_load(&data_in[j])) * 0.5 : 1.0; for (HYPRE_Int d = 0; d < dim; d++) { Odata[d][j] = v * G[d]; } } } #endif HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pix_ptr, hypre_ParCSRMatrix **Piy_ptr, hypre_ParCSRMatrix **Piz_ptr) { hypre_ParCSRMatrix *Pix, *Piy, *Piz; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(G) ); #endif /* Compute Pix, Piy, Piz */ { HYPRE_Int i, j; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); Pix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pix) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(Pix) = 0; hypre_ParCSRMatrixInitialize(Pix); if (dim >= 2) { Piy = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piy) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(Piy) = 0; hypre_ParCSRMatrixInitialize(Piy); } if (dim == 3) { Piz = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piz) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(Piz) = 0; hypre_ParCSRMatrixInitialize(Piz); } Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 2) Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz); HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag); HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag); HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I, G_diag_I)), G_diag_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I, Piz_diag_I)) ); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J, G_diag_J)), G_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J, Piz_diag_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, Pix_diag_data, Piy_diag_data, Piz_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; Piz_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; Piz_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; *Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } else if (dim == 2) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_I, G_diag_I)), G_diag_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_I, Piy_diag_I)) ); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_diag_J, G_diag_J)), G_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_diag_J, Piy_diag_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, NULL, Pix_diag_data, Piy_diag_data, NULL ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } } } else { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( copy_n, G_diag_I, G_diag_nrows + 1, Pix_diag_I ); HYPRE_THRUST_CALL( copy_n, G_diag_J, G_diag_nnz, Pix_diag_J ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, NULL, NULL, Pix_diag_data, NULL, NULL ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; } } } /* Fill-in the off-diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz); HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd); HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd); HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I, G_offd_I)), G_offd_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I, Piz_offd_I)) ); } HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J, G_offd_J)), G_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J, Piz_offd_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, Pix_offd_data, Piy_offd_data, Piz_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; Piz_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; Piz_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; *Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; Piz_cmap[i] = G_cmap[i]; } } else if (dim == 2) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_I, G_offd_I)), G_offd_nrows + 1, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_I, Piy_offd_I)) ); } HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(G_offd_J, G_offd_J)), G_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(Pix_offd_J, Piy_offd_J)) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, NULL, Pix_offd_data, Piy_offd_data, NULL ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; } } else { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( copy_n, G_offd_I, G_offd_nrows + 1, Pix_offd_I ); } HYPRE_THRUST_CALL( copy_n, G_offd_J, G_offd_nnz, Pix_offd_J ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePixyz_copy, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, NULL, NULL, Pix_offd_data, NULL, NULL ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; } } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; } } } *Pix_ptr = Pix; if (dim >= 2) *Piy_ptr = Piy; if (dim == 3) *Piz_ptr = Piz; return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_AMSComputeGPi_copy2(HYPRE_Int nrows, HYPRE_Int dim, HYPRE_Int *i_in, HYPRE_Real *data_in, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data, HYPRE_Real *data_out) { const HYPRE_Int i = hypre_cuda_get_grid_warp_id<1,1>(); if (i >= nrows) { return; } const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int j, istart, iend; HYPRE_Real t, G[3], *Gdata[3]; Gdata[0] = Gx_data; Gdata[1] = Gy_data; Gdata[2] = Gz_data; if (lane_id < 2) { j = read_only_load(i_in + i + lane_id); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); if (lane_id < dim - 1) { t = read_only_load(Gdata[lane_id] + i); } for (HYPRE_Int d = 0; d < dim - 1; d++) { G[d] = __shfl_sync(HYPRE_WARP_FULL_MASK, t, d); } for (j = istart + lane_id; j < iend; j += HYPRE_WARP_SIZE) { const HYPRE_Real u = read_only_load(&data_in[j]); const HYPRE_Real v = fabs(u) * 0.5; const HYPRE_Int k = j * dim; data_out[k] = u; for (HYPRE_Int d = 0; d < dim - 1; d++) { data_out[k+d+1] = v * G[d]; } } } #endif /*-------------------------------------------------------------------------- * hypre_AMSComputeGPi * * Construct the matrix [G,Pi] which can be considered an interpolation * matrix from S_h^4 (4 copies of the scalar linear finite element space) * to the edge finite elements space. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **GPi_ptr) { hypre_ParCSRMatrix *GPi; /* Take into account G */ dim++; /* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); col_starts_size = 2; col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i]; GPi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(GPi) = 1; hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0; hypre_ParCSRMatrixOwnsColStarts(GPi) = 1; hypre_ParCSRMatrixInitialize(GPi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); if (dim >= 3) Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 4) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(G), hypre_ParCSRMatrixMemoryLocation(GPi) ); #endif /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi); HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag); HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag); HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( transform, G_diag_I, G_diag_I + G_diag_nrows + 1, GPi_diag_I, dim * _1 ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_diag_nnz, dim, G_diag_J, GPi_diag_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_diag_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim, G_diag_nrows, dim, G_diag_I, G_diag_data, Gx_data, Gy_data, Gz_data, GPi_diag_data ); } else #endif { for (i = 0; i < G_diag_nrows+1; i++) GPi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *GPi_diag_data++ = G_diag_data[j]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; if (dim >= 3) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi); HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd); HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd); HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { if (G_offd_ncols) { HYPRE_THRUST_CALL( transform, G_offd_I, G_offd_I + G_offd_nrows + 1, GPi_offd_I, dim * _1 ); } dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nnz, "thread", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputePi_copy1, gDim, bDim, G_offd_nnz, dim, G_offd_J, GPi_offd_J ); gDim = hypre_GetDefaultCUDAGridDimension(G_offd_nrows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSComputeGPi_copy2, gDim, bDim, G_offd_nrows, dim, G_offd_I, G_offd_data, Gx_data, Gy_data, Gz_data, GPi_offd_data ); } else #endif { if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) GPi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *GPi_offd_data++ = G_offd_data[j]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; if (dim >= 3) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) GPi_cmap[dim*i+d] = dim*G_cmap[i]+d; } } *GPi_ptr = GPi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetup * * Construct the AMS solver components. * * The following functions need to be called before hypre_AMSSetup(): * - hypre_AMSSetDimension() (if solving a 2D problem) * - hypre_AMSSetDiscreteGradient() * - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors *--------------------------------------------------------------------------*/ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) __global__ void hypreCUDAKernel_FixInterNodes( HYPRE_Int nrows, HYPRE_Int *G0t_diag_i, HYPRE_Complex *G0t_diag_data, HYPRE_Int *G0t_offd_i, HYPRE_Complex *G0t_offd_data, HYPRE_Real *interior_nodes_data) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1,1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Int not1 = 0; if (lane == 0) { not1 = read_only_load(&interior_nodes_data[row_i]) != 1.0; } not1 = __shfl_sync(HYPRE_WARP_FULL_MASK, not1, 0); if (!not1) { return; } HYPRE_Int p1, q1, p2 = 0, q2 = 0; bool nonempty_offd = G0t_offd_data != NULL; if (lane < 2) { p1 = read_only_load(G0t_diag_i + row_i + lane); if (nonempty_offd) { p2 = read_only_load(G0t_offd_i + row_i + lane); } } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); if (nonempty_offd) { q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { G0t_diag_data[j] = 0.0; } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { G0t_offd_data[j] = 0.0; } } __global__ void hypreCUDAKernel_AMSSetupScaleGGt( HYPRE_Int Gt_num_rows, HYPRE_Int *Gt_diag_i, HYPRE_Int *Gt_diag_j, HYPRE_Real *Gt_diag_data, HYPRE_Int *Gt_offd_i, HYPRE_Real *Gt_offd_data, HYPRE_Real *Gx_data, HYPRE_Real *Gy_data, HYPRE_Real *Gz_data ) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1,1>(); if (row_i >= Gt_num_rows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Real h2 = 0.0; HYPRE_Int ne, p1, q1, p2 = 0, q2 = 0; if (lane < 2) { p1 = read_only_load(Gt_diag_i + row_i + lane); } q1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 1); p1 = __shfl_sync(HYPRE_WARP_FULL_MASK, p1, 0); ne = q1 - p1; if (ne == 0) { return; } if (Gt_offd_data != NULL) { if (lane < 2) { p2 = read_only_load(Gt_offd_i + row_i + lane); } q2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 1); p2 = __shfl_sync(HYPRE_WARP_FULL_MASK, p2, 0); } for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { const HYPRE_Int k = read_only_load(&Gt_diag_j[j]); const HYPRE_Real Gx = read_only_load(&Gx_data[k]); const HYPRE_Real Gy = read_only_load(&Gy_data[k]); const HYPRE_Real Gz = read_only_load(&Gz_data[k]); h2 += Gx*Gx + Gy*Gy + Gz*Gz; } h2 = warp_allreduce_sum(h2) / ne; for (HYPRE_Int j = p1 + lane; j < q1; j += HYPRE_WARP_SIZE) { Gt_diag_data[j] *= h2; } for (HYPRE_Int j = p2 + lane; j < q2; j += HYPRE_WARP_SIZE) { Gt_offd_data[j] *= h2; } } #endif HYPRE_Int hypre_AMSSetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); #endif hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int input_info = 0; ams_data -> A = A; /* Modifications for problems with zero-conductivity regions */ if (ams_data -> interior_nodes) { hypre_ParCSRMatrix *G0t, *Aorig = A; /* Make sure that multiple Setup()+Solve() give identical results */ ams_data -> solve_counter = 0; /* Construct the discrete gradient matrix for the zero-conductivity region by eliminating the zero-conductivity nodes from G^t. The range of G0 represents the kernel of A, i.e. the gradients of nodal basis functions supported in zero-conductivity regions. */ hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1); { HYPRE_Int i, j; HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G); hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t); HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td); HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td); hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t); HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to); HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to); HYPRE_Real *interior_nodes_data=hypre_VectorData( hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(nv, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_FixInterNodes, gDim, bDim, nv, G0tdI, G0tdA, G0toI, G0toA, interior_nodes_data ); } else #endif { for (i = 0; i < nv; i++) { if (interior_nodes_data[i] != 1) { for (j = G0tdI[i]; j < G0tdI[i+1]; j++) G0tdA[j] = 0.0; if (G0toI) for (j = G0toI[i]; j < G0toI[i+1]; j++) G0toA[j] = 0.0; } } } } hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1); /* Construct the subspace matrix A_G0 = G0^T G0 */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_G0 = hypre_ParCSRMatMat(G0t, ams_data -> G0); } else #endif { ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0); } hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0); /* Create AMG solver for A_G0 */ HYPRE_BoomerAMGCreate(&ams_data -> B_G0); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */ HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3); HYPRE_BoomerAMGSetup(ams_data -> B_G0, (HYPRE_ParCSRMatrix)ams_data -> A_G0, 0, 0); /* Construct the preconditioner for ams_data->A = A + G0 G0^T. NOTE: this can be optimized significantly by taking into account that the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */ { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrix *A; if (exec == HYPRE_EXEC_DEVICE) { A = hypre_ParCSRMatMat(ams_data -> G0, G0t); } else #endif { A = hypre_ParMatmul(ams_data -> G0, G0t); } hypre_ParCSRMatrix *B = Aorig; hypre_ParCSRMatrix **C_ptr = &ams_data -> A; hypre_ParCSRMatrix *C; HYPRE_Real factor, lfactor; /* scale (penalize) G0 G0^T before adding it to the matrix */ { HYPRE_Int i; HYPRE_Int B_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(B)); HYPRE_Real *B_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(B)); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(B)); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(B)); lfactor = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); #if defined(HYPRE_DEBUG) HYPRE_Int nnz; hypre_TMemcpy(&nnz, &B_diag_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_assert(nnz == nnz_diag); hypre_TMemcpy(&nnz, &B_offd_i[B_num_rows], HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_assert(nnz == nnz_offd); #endif if (nnz_diag) { lfactor = HYPRE_THRUST_CALL( reduce, thrust::make_transform_iterator(B_diag_data, absolute_value<HYPRE_Real>()), thrust::make_transform_iterator(B_diag_data + nnz_diag, absolute_value<HYPRE_Real>()), -1.0, thrust::maximum<HYPRE_Real>() ); } if (nnz_offd) { lfactor = HYPRE_THRUST_CALL( reduce, thrust::make_transform_iterator(B_offd_data, absolute_value<HYPRE_Real>()), thrust::make_transform_iterator(B_offd_data + nnz_offd, absolute_value<HYPRE_Real>()), lfactor, thrust::maximum<HYPRE_Real>() ); } } else #endif { for (i = 0; i < B_diag_i[B_num_rows]; i++) if (fabs(B_diag_data[i]) > lfactor) lfactor = fabs(B_diag_data[i]); for (i = 0; i < B_offd_i[B_num_rows]; i++) if (fabs(B_offd_data[i]) > lfactor) lfactor = fabs(B_offd_data[i]); } lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */ hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); } hypre_ParCSRMatrixAdd(factor, A, 1.0, B, &C); /*hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B);*/ /* scale (penalize) G0 G0^T before adding it to the matrix */ /*{ HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local); HYPRE_Real *data = hypre_CSRMatrixData(A_local); HYPRE_Real *dataB = hypre_CSRMatrixData(B_local); HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local); HYPRE_Real factor, lfactor; lfactor = -1; for (i = 0; i < nnzB; i++) if (fabs(dataB[i]) > lfactor) lfactor = fabs(dataB[i]); lfactor *= 1e-10; hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); for (i = 0; i < nnz; i++) data[i] *= factor; } C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local); C_local = hypre_CSRMatrixBigDeleteZeros(C_tmp,0.0); if (C_local) hypre_CSRMatrixDestroy(C_tmp); else C_local = C_tmp; C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 1; hypre_ParCSRMatrixOwnsColStarts(G0t) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); */ hypre_ParCSRMatrixDestroy(A); *C_ptr = C; } hypre_ParCSRMatrixDestroy(G0t); } /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */ /* Compute the l1 norm of the rows of A */ if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4) { HYPRE_Real *l1_norm_data = NULL; hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &l1_norm_data); ams_data -> A_l1_norms = hypre_SeqVectorCreate(hypre_ParCSRMatrixNumRows(ams_data -> A)); hypre_VectorData(ams_data -> A_l1_norms) = l1_norm_data; hypre_SeqVectorInitialize_v2(ams_data -> A_l1_norms, hypre_ParCSRMatrixMemoryLocation(ams_data -> A)); } /* Chebyshev? */ if (ams_data -> A_relax_type == 16) { hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10, &ams_data->A_max_eig_est, &ams_data->A_min_eig_est); } /* If not given, compute Gx, Gy and Gz */ { if (ams_data -> x != NULL && (ams_data -> dim == 1 || ams_data -> y != NULL) && (ams_data -> dim <= 2 || ams_data -> z != NULL)) input_info = 1; if (ams_data -> Gx != NULL && (ams_data -> dim == 1 || ams_data -> Gy != NULL) && (ams_data -> dim <= 2 || ams_data -> Gz != NULL)) input_info = 2; if (input_info == 1) { ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx); if (ams_data -> dim >= 2) { ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy); } if (ams_data -> dim == 3) { ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz); } } } if (ams_data -> Pi == NULL && ams_data -> Pix == NULL) { if (ams_data -> cycle_type == 20) /* Construct the combined interpolation matrix [G,Pi] */ hypre_AMSComputeGPi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); else if (ams_data -> cycle_type > 10) /* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */ hypre_AMSComputePixyz(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pix, &ams_data -> Piy, &ams_data -> Piz); else /* Construct the Pi interpolation matrix */ hypre_AMSComputePi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); } /* Keep Gx, Gy and Gz only if use the method with discrete divergence stabilization (where we use them to compute the local mesh size). */ if (input_info == 1 && ams_data -> cycle_type != 9) { hypre_ParVectorDestroy(ams_data -> Gx); if (ams_data -> dim >= 2) hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } /* Create the AMG solver on the range of G^T */ if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20) { HYPRE_BoomerAMGCreate(&ams_data -> B_G); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2); /* If not given, construct the coarse space matrix by RAP */ if (!ams_data -> A_G) { HYPRE_Int G_owned_col_starts; if (!hypre_ParCSRMatrixCommPkg(ams_data -> G)) hypre_MatvecCommPkgCreate(ams_data -> G); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_G = hypre_ParCSRMatrixRAPKT(ams_data -> G, ams_data -> A, ams_data -> G, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> G, ams_data -> A, ams_data -> G, &ams_data -> A_G); } /* Make sure that A_G has no zero rows (this can happen if beta is zero in part of the domain). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G); hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts; hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0; ams_data -> owns_A_G = 1; } HYPRE_BoomerAMGSetup(ams_data -> B_G, (HYPRE_ParCSRMatrix)ams_data -> A_G, 0, 0); } if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20) /* Create the AMG solvers on the range of Pi{x,y,z}^T */ { HYPRE_Int P_owned_col_starts; HYPRE_BoomerAMGCreate(&ams_data -> B_Pix); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piy); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piz); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2); /* Generally, don't use exact solve on the coarsest level (matrices may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2); } /* Construct the coarse space matrices by RAP */ if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix)) hypre_MatvecCommPkgCreate(ams_data -> Pix); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pix = hypre_ParCSRMatrixRAPKT(ams_data -> Pix, ams_data -> A, ams_data -> Pix, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix, ams_data -> A, ams_data -> Pix, &ams_data -> A_Pix); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0; } /* Make sure that A_Pix has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix); HYPRE_BoomerAMGSetup(ams_data -> B_Pix, (HYPRE_ParCSRMatrix)ams_data -> A_Pix, 0, 0); if (ams_data -> Piy) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy)) hypre_MatvecCommPkgCreate(ams_data -> Piy); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Piy = hypre_ParCSRMatrixRAPKT(ams_data -> Piy, ams_data -> A, ams_data -> Piy, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy, ams_data -> A, ams_data -> Piy, &ams_data -> A_Piy); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0; } /* Make sure that A_Piy has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy); HYPRE_BoomerAMGSetup(ams_data -> B_Piy, (HYPRE_ParCSRMatrix)ams_data -> A_Piy, 0, 0); } if (ams_data -> Piz) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz)) hypre_MatvecCommPkgCreate(ams_data -> Piz); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Piz = hypre_ParCSRMatrixRAPKT(ams_data -> Piz, ams_data -> A, ams_data -> Piz, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz, ams_data -> A, ams_data -> Piz, &ams_data -> A_Piz); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0; } /* Make sure that A_Piz has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz); HYPRE_BoomerAMGSetup(ams_data -> B_Piz, (HYPRE_ParCSRMatrix)ams_data -> A_Piz, 0, 0); } } else /* Create the AMG solver on the range of Pi^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pi); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2); /* If not given, construct the coarse space matrix by RAP and notify BoomerAMG that this is a dim x dim block system. */ if (!ams_data -> A_Pi) { HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi)) hypre_MatvecCommPkgCreate(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); if (ams_data -> cycle_type == 9) { /* Add a discrete divergence term to A before computing Pi^t A Pi */ { hypre_ParCSRMatrix *Gt, *GGt, *ApGGt; hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1); hypre_ParCSRMatrixOwnsColStarts(Gt) = 0; hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0; /* scale GGt by h^2 */ { HYPRE_Real h2; HYPRE_Int i, j, k, ne; hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt); HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag); HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag); HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag); HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag); hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt); HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd); HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd); HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx)); HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy)); HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(Gt_num_rows, "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_AMSSetupScaleGGt, gDim, bDim, Gt_num_rows, Gt_diag_I, Gt_diag_J, Gt_diag_data, Gt_offd_I, Gt_offd_data, Gx_data, Gy_data, Gz_data ); } else #endif { for (i = 0; i < Gt_num_rows; i++) { /* determine the characteristic mesh size for vertex i */ h2 = 0.0; ne = 0; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) { k = Gt_diag_J[j]; h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k]; ne++; } if (ne != 0) { h2 /= ne; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) Gt_diag_data[j] *= h2; for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++) Gt_offd_data[j] *= h2; } } } } /* we only needed Gx, Gy and Gz to compute the local mesh size */ if (input_info == 1) { hypre_ParVectorDestroy(ams_data -> Gx); if (ams_data -> dim >= 2) hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { GGt = hypre_ParCSRMatMat(ams_data -> G, Gt); } #endif else { GGt = hypre_ParMatmul(ams_data -> G, Gt); } hypre_ParCSRMatrixDestroy(Gt); /* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */ hypre_ParCSRMatrixAdd(1.0, GGt, 1.0, ams_data -> A, &ApGGt); /*{ hypre_ParCSRMatrix *A = GGt; hypre_ParCSRMatrix *B = ams_data -> A; hypre_ParCSRMatrix **C_ptr = &ApGGt; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); C_local = hypre_CSRMatrixBigAdd(A_local, B_local); hypre_CSRMatrixBigJtoJ(C_local); C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); *C_ptr = C; }*/ hypre_ParCSRMatrixDestroy(GGt); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ApGGt, ams_data -> Pi, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ApGGt, ams_data -> Pi, &ams_data -> A_Pi); } } } else { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (exec == HYPRE_EXEC_DEVICE) { ams_data -> A_Pi = hypre_ParCSRMatrixRAPKT(ams_data -> Pi, ams_data -> A, ams_data -> Pi, 1); } else #endif { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ams_data -> A, ams_data -> Pi, &ams_data -> A_Pi); } } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0; } ams_data -> owns_A_Pi = 1; if (ams_data -> cycle_type != 20) HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim); else HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1); /* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */ } /* Make sure that A_Pi has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi); HYPRE_BoomerAMGSetup(ams_data -> B_Pi, (HYPRE_ParCSRMatrix)ams_data -> A_Pi, 0, 0); } /* Allocate temporary vectors */ ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A); ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A); if (ams_data -> A_G) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G); } if (ams_data -> r1 == NULL && ams_data -> A_Pix) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); } if (ams_data -> Pi) { ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi); ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSolve * * Solve the system A x = b. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSolve(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, my_id = -1; HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid; char cycle[30]; hypre_ParCSRMatrix *Ai[5], *Pi[5]; HYPRE_Solver Bi[5]; HYPRE_PtrToSolverFcn HBi[5]; hypre_ParVector *ri[5], *gi[5]; HYPRE_Int needZ = 0; hypre_ParVector *z = ams_data -> zz; Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G; Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi; Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix; Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy; Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz; Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve; Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; ri[0] = ams_data -> r1; gi[0] = ams_data -> g1; ri[1] = ams_data -> r2; gi[1] = ams_data -> g2; ri[2] = ams_data -> r1; gi[2] = ams_data -> g1; ri[3] = ams_data -> r1; gi[3] = ams_data -> g1; ri[4] = ams_data -> r1; gi[4] = ams_data -> g1; /* may need to create an additional temporary vector for relaxation */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { needZ = ams_data -> A_relax_type == 2 || ams_data -> A_relax_type == 4 || ams_data -> A_relax_type == 16; } else #endif { needZ = hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16; } if (needZ && !z) { z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(z); hypre_ParVectorSetPartitioningOwner(z,0); ams_data -> zz = z; } if (ams_data -> print_level > 0) hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id); /* Compatible subspace projection for problems with zero-conductivity regions. Note that this modifies the input (r.h.s.) vector b! */ if ( (ams_data -> B_G0) && (++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) ) { /* hypre_printf("Projecting onto the compatible subspace...\n"); */ hypre_AMSProjectOutGradients(ams_data, b); } if (ams_data -> beta_is_zero) { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","0"); break; case 1: case 3: case 5: case 7: default: hypre_sprintf(cycle,"%s","020"); break; case 2: case 4: case 6: case 8: hypre_sprintf(cycle,"%s","(0+2)"); break; case 11: case 13: hypre_sprintf(cycle,"%s","0345430"); break; case 12: hypre_sprintf(cycle,"%s","(0+3+4+5)"); break; case 14: hypre_sprintf(cycle,"%s","0(+3+4+5)0"); break; } } else { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","010"); break; case 1: default: hypre_sprintf(cycle,"%s","01210"); break; case 2: hypre_sprintf(cycle,"%s","(0+1+2)"); break; case 3: hypre_sprintf(cycle,"%s","02120"); break; case 4: hypre_sprintf(cycle,"%s","(010+2)"); break; case 5: hypre_sprintf(cycle,"%s","0102010"); break; case 6: hypre_sprintf(cycle,"%s","(020+1)"); break; case 7: hypre_sprintf(cycle,"%s","0201020"); break; case 8: hypre_sprintf(cycle,"%s","0(+1+2)0"); break; case 9: hypre_sprintf(cycle,"%s","01210"); break; case 11: hypre_sprintf(cycle,"%s","013454310"); break; case 12: hypre_sprintf(cycle,"%s","(0+1+3+4+5)"); break; case 13: hypre_sprintf(cycle,"%s","034515430"); break; case 14: hypre_sprintf(cycle,"%s","01(+3+4+5)10"); break; case 20: hypre_sprintf(cycle,"%s","020"); break; } } for (i = 0; i < ams_data -> maxit; i++) { /* Compute initial residual norms */ if (ams_data -> maxit > 1 && i == 0) { hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); r0_norm = r_norm; b_norm = sqrt(hypre_ParVectorInnerProd(b, b)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", r_norm, relative_resid); } } /* Apply the preconditioner */ hypre_ParCSRSubspacePrec(ams_data -> A, ams_data -> A_relax_type, ams_data -> A_relax_times, ams_data -> A_l1_norms ? hypre_VectorData(ams_data -> A_l1_norms) : NULL, ams_data -> A_relax_weight, ams_data -> A_omega, ams_data -> A_max_eig_est, ams_data -> A_min_eig_est, ams_data -> A_cheby_order, ams_data -> A_cheby_fraction, Ai, Bi, HBi, Pi, ri, gi, b, x, ams_data -> r0, ams_data -> g0, cycle, z); /* Compute new residual norms */ if (ams_data -> maxit > 1) { old_resid = r_norm; hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) hypre_printf(" Cycle %2d %e %f %e \n", i+1, r_norm, r_norm / old_resid, relative_resid); } if (relative_resid < ams_data -> tol) { i++; break; } } if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1) hypre_printf("\n\n Average Convergence Factor = %f\n\n", pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i))); ams_data -> num_iterations = i; ams_data -> rel_resid_norm = relative_resid; if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0) hypre_error(HYPRE_ERROR_CONV); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRSubspacePrec * * General subspace preconditioner for A0 y = x, based on ParCSR storage. * * P[i] and A[i] are the interpolation and coarse grid matrices for * the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i] * are temporary vectors. A0_* are the fine grid smoothing parameters. * * The default mode is multiplicative, '+' changes the next correction * to additive, based on residual computed at '('. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */ hypre_ParCSRMatrix *A0, /* relaxation parameters */ HYPRE_Int A0_relax_type, HYPRE_Int A0_relax_times, HYPRE_Real *A0_l1_norms, HYPRE_Real A0_relax_weight, HYPRE_Real A0_omega, HYPRE_Real A0_max_eig_est, HYPRE_Real A0_min_eig_est, HYPRE_Int A0_cheby_order, HYPRE_Real A0_cheby_fraction, /* subspace matrices */ hypre_ParCSRMatrix **A, /* subspace preconditioners */ HYPRE_Solver *B, /* hypre solver functions for B */ HYPRE_PtrToSolverFcn *HB, /* subspace interpolations */ hypre_ParCSRMatrix **P, /* temporary subspace vectors */ hypre_ParVector **r, hypre_ParVector **g, /* right-hand side */ hypre_ParVector *x, /* current approximation */ hypre_ParVector *y, /* current residual */ hypre_ParVector *r0, /* temporary vector */ hypre_ParVector *g0, char *cycle, /* temporary vector */ hypre_ParVector *z) { char *op; HYPRE_Int use_saved_residual = 0; for (op = cycle; *op != '\0'; op++) { /* do nothing */ if (*op == ')') continue; /* compute the residual: r = x - Ay */ else if (*op == '(') { hypre_ParVectorCopy(x,r0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0); } /* switch to additive correction */ else if (*op == '+') { use_saved_residual = 1; continue; } /* smooth: y += S (x - Ay) */ else if (*op == '0') { hypre_ParCSRRelax(A0, x, A0_relax_type, A0_relax_times, A0_l1_norms, A0_relax_weight, A0_omega, A0_max_eig_est, A0_min_eig_est, A0_cheby_order, A0_cheby_fraction, y, g0, z); } /* subspace correction: y += P B^{-1} P^t r */ else { HYPRE_Int i = *op - '1'; if (i < 0) hypre_error_in_arg(16); /* skip empty subspaces */ if (!A[i]) continue; /* compute the residual? */ if (use_saved_residual) { use_saved_residual = 0; hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]); } else { hypre_ParVectorCopy(x,g0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0); hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]); } hypre_ParVectorSetConstantValues(g[i], 0.0); (*HB[i]) (B[i], (HYPRE_Matrix)A[i], (HYPRE_Vector)r[i], (HYPRE_Vector)g[i]); hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0); hypre_ParVectorAxpy(1.0, g0, y); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetNumIterations * * Get the number of AMS iterations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetNumIterations(void *solver, HYPRE_Int *num_iterations) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *num_iterations = ams_data -> num_iterations; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetFinalRelativeResidualNorm * * Get the final relative residual norm in AMS. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver, HYPRE_Real *rel_resid_norm) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *rel_resid_norm = ams_data -> rel_resid_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSProjectOutGradients * * For problems with zero-conductivity regions, project the vector onto the * compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the * discrete gradient restricted to the interior nodes of the regions with * zero conductivity. This ensures that x is orthogonal to the gradients in * the range of G0. * * This function is typically called after the solution iteration is complete, * in order to facilitate the visualization of the computed field. Without it * the values in the zero-conductivity regions contain kernel components. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSProjectOutGradients(void *solver, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> B_G0) { hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1); hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0); hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1); hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0); hypre_ParVectorAxpy(-1.0, ams_data -> g0, x); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSConstructDiscreteGradient * * Construct and return the lowest-order discrete gradient matrix G, based on: * - a matrix on the egdes (e.g. the stiffness matrix A) * - a vector on the vertices (e.g. the x coordinates) * - the array edge_vertex, which lists the global indexes of the * vertices of the local edges. * * We assume that edge_vertex lists the edge vertices consecutively, * and that the orientation of all edges is consistent. More specificaly: * If edge_orientation = 1, the edges are already oriented. * If edge_orientation = 2, the orientation of edge i depends only on the * sign of edge_vertex[2*i+1] - edge_vertex[2*i]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A, hypre_ParVector *x_coord, HYPRE_BigInt *edge_vertex, HYPRE_Int edge_orientation, hypre_ParCSRMatrix **G_ptr) { hypre_ParCSRMatrix *G; HYPRE_Int nedges; nedges = hypre_ParCSRMatrixNumRows(A); /* Construct the local part of G based on edge_vertex and the edge and vertex partitionings from A and x_coord */ { HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST); HYPRE_Int part_size; HYPRE_BigInt *row_starts, *col_starts; HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges, hypre_ParVectorGlobalSize(x_coord), 2*nedges); for (i = 0; i <= nedges; i++) I[i] = 2*i; if (edge_orientation == 1) { /* Assume that the edges are already oriented */ for (i = 0; i < 2*nedges; i+=2) { data[i] = -1.0; data[i+1] = 1.0; } } else if (edge_orientation == 2) { /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*nedges; i+=2) { if (edge_vertex[i] < edge_vertex[i+1]) { data[i] = -1.0; data[i+1] = 1.0; } else { data[i] = 1.0; data[i+1] = -1.0; } } } else { hypre_error_in_arg(4); } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = nedges; /* Copy partitioning from A and x_coord (previously they were re-used) */ part_size = 2; row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); for (i = 0; i < part_size; i++) { row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i]; col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i]; } /* Generate the discrete gradient matrix */ G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParVectorGlobalSize(x_coord), row_starts, col_starts, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 1; hypre_ParCSRMatrixOwnsColStarts(G) = 1; hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, hypre_ParVectorFirstIndex(x_coord), hypre_ParVectorLastIndex(x_coord)); /* Account for empty rows in G. These may appear when A includes only the interior (non-Dirichlet b.c.) edges. */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord)); } /* Free the local matrix */ hypre_CSRMatrixDestroy(local); } *G_ptr = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEISetup * * Construct an AMS solver object based on the following data: * * A - the edge element stiffness matrix * num_vert - number of vertices (nodes) in the processor * num_local_vert - number of vertices owned by the processor * vert_number - global indexes of the vertices in the processor * vert_coord - coordinates of the vertices in the processor * num_edges - number of edges owned by the processor * edge_vertex - the vertices of the edges owned by the processor. * Vertices are in local numbering (the same as in * vert_number), and edge orientation is always from * the first to the second vertex. * * Here we distinguish between vertices that belong to elements in the * current processor, and the subset of these vertices that is owned by * the processor. * * This function is written specifically for input from the FEI and should * be called before hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEISetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x, HYPRE_Int num_vert, HYPRE_Int num_local_vert, HYPRE_BigInt *vert_number, HYPRE_Real *vert_coord, HYPRE_Int num_edges, HYPRE_BigInt *edge_vertex) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, j; hypre_ParCSRMatrix *G; hypre_ParVector *x_coord, *y_coord, *z_coord; HYPRE_Real *x_data, *y_data, *z_data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt *vert_part, num_global_vert; HYPRE_BigInt vert_start, vert_end; HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert; /* Find the processor partitioning of the vertices */ vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); vert_part[0] = vert_part[1] - big_local_vert; hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); /* Construct hypre parallel vectors for the vertex coordinates */ x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(x_coord); hypre_ParVectorOwnsData(x_coord) = 1; hypre_ParVectorOwnsPartitioning(x_coord) = 0; x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord)); y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(y_coord); hypre_ParVectorOwnsData(y_coord) = 1; hypre_ParVectorOwnsPartitioning(y_coord) = 0; y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord)); z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(z_coord); hypre_ParVectorOwnsData(z_coord) = 1; hypre_ParVectorOwnsPartitioning(z_coord) = 0; z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord)); vert_start = hypre_ParVectorFirstIndex(x_coord); vert_end = hypre_ParVectorLastIndex(x_coord); /* Save coordinates of locally owned vertices */ for (i = 0; i < num_vert; i++) { if (vert_number[i] >= vert_start && vert_number[i] <= vert_end) { j = (HYPRE_Int)(vert_number[i] - vert_start); x_data[j] = vert_coord[3*i]; y_data[j] = vert_coord[3*i+1]; z_data[j] = vert_coord[3*i+2]; } } /* Change vertex numbers from local to global */ for (i = 0; i < 2*num_edges; i++) edge_vertex[i] = vert_number[edge_vertex[i]]; /* Construct the local part of G based on edge_vertex */ { /* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */ HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges, num_global_vert, 2*num_edges); for (i = 0; i <= num_edges; i++) I[i] = 2*i; /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*num_edges; i+=2) { data[i] = 1.0; data[i+1] = -1.0; } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = num_edges; G = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), num_global_vert, hypre_ParCSRMatrixRowStarts(A), vert_part, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 0; hypre_ParCSRMatrixOwnsColStarts(G) = 1; hypre_CSRMatrixBigJtoJ(local); GenerateDiagAndOffd(local, G, vert_start, vert_end); //hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } ams_data -> G = G; ams_data -> x = x_coord; ams_data -> y = y_coord; ams_data -> z = z_coord; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEIDestroy * * Free the additional memory allocated in hypre_AMSFEISetup(). * * This function is written specifically for input from the FEI and should * be called before hypre_AMSDestroy(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEIDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> G) hypre_ParCSRMatrixDestroy(ams_data -> G); if (ams_data -> x) hypre_ParVectorDestroy(ams_data -> x); if (ams_data -> y) hypre_ParVectorDestroy(ams_data -> y); if (ams_data -> z) hypre_ParVectorDestroy(ams_data -> z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms Threads * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int num_threads, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, hypre_ParCSRMatrixMemoryLocation(A)); HYPRE_Int ii, ns, ne, rest, size; HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE #endif for (k = 0; k < num_threads; k++) { size = num_rows/num_threads; rest = num_rows - size*num_threads; if (k < rest) { ns = k*size+k; ne = (k+1)*size+k+1; } else { ns = k*size+rest; ne = (k+1)*size+rest; } if (option == 1) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { /* Set the diag element */ for (i = ns; i < ne; i++) { l1_norm[i] = A_diag_data[A_diag_I[i]]; if (l1_norm[i] == 0) l1_norm[i] = 1.0; } } if (option < 5) { /* Handle negative definite matrices */ for (i = ns; i < ne; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = ns; i < ne; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } } hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; return hypre_error_flag; }
asaxpy.c
/** * @file asaxpy.c * @brief Function definition for performing the \c saxpy operation on accelerator. * * This source file contains function definition for the \c saxpy operation, * which is defined as: * * y := a * x + y * * where: * * - a is a scalar. * - x and y are single-precision vectors each with n elements. * * @author Xin Wu (PC²) * @date 05.04.2020 * @copyright CC BY-SA 2.0 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include <hip/hip_runtime.h> #include <hipblas.h> #include "wtcalc.h" #include "asaxpy.h" void asaxpy(const int n, const float a, const float *x, float *y, const int ial) { hipblasHandle_t handle; float alfa = a, *x_dev = NULL, *y_dev = NULL; struct timespec rt[2]; int m = (n >> 4); switch (ial) { case 0: /* * - <<<2^7 , 2^7 >>>, auto scheduling */ #pragma omp target data device(0) map(to:a, n, x[0:n]) map(tofrom:y[0:n]) { clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target teams distribute parallel for device(0) \ num_teams(128) num_threads(128) dist_schedule(static, 128) shared(a, n, x, y) for (int i = 0; i < n; ++i) { y[i] = a * x[i] + y[i]; } clock_gettime(CLOCK_REALTIME, rt + 1); } break; case 1: /* * - <<<2^16, 2^10>>>, manual scheduling */ #pragma omp target data device(0) \ map(to:a, n, x[0:n]) map(tofrom:y[0:n]) { clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target teams distribute parallel for device(0) \ num_teams(65536) num_threads(1024) dist_schedule(static, 1024) shared(a, n, x, y) for (int i = 0; i < n; ++i) { y[i] = a * x[i] + y[i]; } clock_gettime(CLOCK_REALTIME, rt + 1); } break; case 2: /* * - <<<2^15, 2^7 >>>, manual scheduling, 16x loop unrolling (2^15*2^7*16==2^26) */ #pragma omp target data device(0) \ map(to:a, m, x[0:n]) map(tofrom:y[0:n]) { clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target teams distribute parallel for device(0) \ num_teams(65536/2) num_threads(128) dist_schedule(static, 128) shared(a, m, x, y) for (int i = 0; i < m; ++i) { y[i ] = a * x[i ] + y[i ]; y[i + m] = a * x[i + m] + y[i + m]; y[i + 0x2 * m] = a * x[i + 0x2 * m] + y[i + 0x2 * m]; y[i + 0x3 * m] = a * x[i + 0x3 * m] + y[i + 0x3 * m]; y[i + 0x4 * m] = a * x[i + 0x4 * m] + y[i + 0x4 * m]; y[i + 0x5 * m] = a * x[i + 0x5 * m] + y[i + 0x5 * m]; y[i + 0x6 * m] = a * x[i + 0x6 * m] + y[i + 0x6 * m]; y[i + 0x7 * m] = a * x[i + 0x7 * m] + y[i + 0x7 * m]; y[i + 0x8 * m] = a * x[i + 0x8 * m] + y[i + 0x8 * m]; y[i + 0x9 * m] = a * x[i + 0x9 * m] + y[i + 0x9 * m]; y[i + 0xa * m] = a * x[i + 0xa * m] + y[i + 0xa * m]; y[i + 0xb * m] = a * x[i + 0xb * m] + y[i + 0xb * m]; y[i + 0xc * m] = a * x[i + 0xc * m] + y[i + 0xc * m]; y[i + 0xd * m] = a * x[i + 0xd * m] + y[i + 0xd * m]; y[i + 0xe * m] = a * x[i + 0xe * m] + y[i + 0xe * m]; y[i + 0xf * m] = a * x[i + 0xf * m] + y[i + 0xf * m]; } clock_gettime(CLOCK_REALTIME, rt + 1); } break; case 3: /* * - <<<2^12, 2^7 >>>, auto scheduling, 16x loop unrolling */ #pragma omp target data device(0) \ map(to:a, m, x[0:n]) map(tofrom:y[0:n]) { clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target teams distribute parallel for device(0) \ num_teams(4096) num_threads(128) dist_schedule(static, 128) shared(a, m, x, y) for (int i = 0; i < m; ++i) { y[i ] = a * x[i ] + y[i ]; y[i + m] = a * x[i + m] + y[i + m]; y[i + 0x2 * m] = a * x[i + 0x2 * m] + y[i + 0x2 * m]; y[i + 0x3 * m] = a * x[i + 0x3 * m] + y[i + 0x3 * m]; y[i + 0x4 * m] = a * x[i + 0x4 * m] + y[i + 0x4 * m]; y[i + 0x5 * m] = a * x[i + 0x5 * m] + y[i + 0x5 * m]; y[i + 0x6 * m] = a * x[i + 0x6 * m] + y[i + 0x6 * m]; y[i + 0x7 * m] = a * x[i + 0x7 * m] + y[i + 0x7 * m]; y[i + 0x8 * m] = a * x[i + 0x8 * m] + y[i + 0x8 * m]; y[i + 0x9 * m] = a * x[i + 0x9 * m] + y[i + 0x9 * m]; y[i + 0xa * m] = a * x[i + 0xa * m] + y[i + 0xa * m]; y[i + 0xb * m] = a * x[i + 0xb * m] + y[i + 0xb * m]; y[i + 0xc * m] = a * x[i + 0xc * m] + y[i + 0xc * m]; y[i + 0xd * m] = a * x[i + 0xd * m] + y[i + 0xd * m]; y[i + 0xe * m] = a * x[i + 0xe * m] + y[i + 0xe * m]; y[i + 0xf * m] = a * x[i + 0xf * m] + y[i + 0xf * m]; } clock_gettime(CLOCK_REALTIME, rt + 1); } break; case 4: /* * - <<<2^16, 2^9>>>: * * de-linearize the vector (convert the vector to matrix) * * collapse the ji-loop * * 2x i-loop unrolling */ #pragma omp target data device(0) \ map(to:a, x[0:n]) map(tofrom:y[0:n]) { clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target teams distribute parallel for device(0) \ num_teams(65536) num_threads(512) dist_schedule(static, 512) \ collapse(2) shared(a, x, y) for (int j = 0; j < 65536; ++j) { for (int i = 0; i < 512; ++i) { /* 2x i-loop unrolling */ y[j * 1024 + i ] += a * x[j * 1024 + i ]; y[j * 1024 + i + 512] += a * x[j * 1024 + i + 512]; } } clock_gettime(CLOCK_REALTIME, rt + 1); } break; default: /* * hipblasSaxpy in HIPBLAS */ if (HIPBLAS_STATUS_SUCCESS != hipblasCreate(&handle)) { printf("error: initialization (HIPBLAS)\n"); hipblasDestroy(handle); exit(EXIT_FAILURE); } if (hipSuccess != hipMalloc((void **) &x_dev, sizeof(*x) * n) || hipSuccess != hipMalloc((void **) &y_dev, sizeof(*y) * n)) { printf("error: memory allocation (HIP)\n"); hipFree(x_dev); hipFree(y_dev); hipblasDestroy(handle); exit(EXIT_FAILURE); } if (HIPBLAS_STATUS_SUCCESS != hipblasSetVector(n, sizeof(*x), x, 1, x_dev, 1) || HIPBLAS_STATUS_SUCCESS != hipblasSetVector(n, sizeof(*y), y, 1, y_dev, 1)) { printf("error: host --> accl (HIPBLAS)\n"); hipFree(x_dev); hipFree(y_dev); hipblasDestroy(handle); exit(EXIT_FAILURE); } clock_gettime(CLOCK_REALTIME, rt + 0); if (HIPBLAS_STATUS_SUCCESS != hipblasSaxpy(handle, n, &alfa, x_dev, 1, y_dev, 1)) { printf("error: hipblasSaxpy (HIPBLAS)\n"); hipFree(x_dev); hipFree(y_dev); hipblasDestroy(handle); exit(EXIT_FAILURE); } if (hipSuccess != hipDeviceSynchronize()) { printf("error: device synchronization (HIP)\n"); hipFree(x_dev); hipFree(y_dev); hipblasDestroy(handle); exit(EXIT_FAILURE); } clock_gettime(CLOCK_REALTIME, rt + 1); if (HIPBLAS_STATUS_SUCCESS != hipblasGetVector(n, sizeof(*y), y_dev, 1, y, 1)) { printf("error: accl --> host (HIPBLAS)\n"); hipFree(x_dev); hipFree(y_dev); hipblasDestroy(handle); exit(EXIT_FAILURE); } hipFree(x_dev); hipFree(y_dev); hipblasDestroy(handle); break; } /* end switch (ial) */ if (wtcalc >= 0.0) { wtcalc += (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); } }
imutils.c
/* * Author: Curtis McCully * October 2014 * Licensed under a 3-clause BSD style license - see LICENSE.rst * * Originally written in C++ in 2011 * See also https://github.com/cmccully/lacosmicx * * This file contains image utility functions for SCRAPPY. These are the most * computationally expensive pieces of the calculation so they have been ported * to C. * * Many thanks to Nicolas Devillard who wrote the optimized methods for finding * the median and placed them in the public domain. I have noted in the * comments places that use Nicolas Devillard's code. * * Parallelization has been achieved using OpenMP. Using a compiler that does * not support OpenMP, e.g. clang currently, the code should still compile and * run serially without issue. I have tried to be explicit as possible about * specifying which variables are private and which should be shared, although * we never actually have any shared variables. We use firstprivate instead. * This does mean that it is important that we never have two threads write to * the same memory position at the same time. * * All calculations are done with 32 bit floats to keep the memory footprint * small. */ #include<Python.h> #include "imutils.h" /* Subsample an array 2x2 given an input array data with size nx x ny. Each * pixel is replicated into 4 pixels; no averaging is performed. The results * are saved in the output array. The output array should already be allocated * as we work on it in place. Data should be striped in the x direction such * that the memory location of pixel i,j is data[nx *j + i]. */ void PySubsample(float* data, float* output, int nx, int ny) { PyDoc_STRVAR(PySubsample__doc__, "PySubample(data, output, nx, ny) -> void\n\n" "Subsample an array 2x2 given an input array data with size " "nx x ny.The results are saved in the output array. The output " "array should already be allocated as we work on it in place. Each" " pixel is replicated into 4 pixels; no averaging is performed. " "Data should be striped in the x direction such that the memory " "location of pixel i,j is data[nx *j + i]."); /* Precalculate the new length; minor optimization */ int padnx = 2 * nx; /* Loop indices */ int i, j, nxj, padnxj; /* Loop over all pixels */ #pragma omp parallel for firstprivate(data, output, nx, ny, padnx) \ private(i, j, nxj, padnxj) for (j = 0; j < ny; j++) { nxj = nx * j; padnxj = 2 * padnx * j; for (i = 0; i < nx; i++) { /* Copy the pixel value into a 2x2 grid on the output image */ output[2 * i + padnxj] = data[i + nxj]; output[2 * i + padnxj + padnx] = data[i + nxj]; output[2 * i + 1 + padnxj + padnx] = data[i + nxj]; output[2 * i + 1 + padnxj] = data[i + nxj]; } } return; } /* Rebin an array 2x2, with size (2 * nx) x (2 * ny). Rebin the array by block * averaging 4 pixels back into 1. This is effectively the opposite of * subsample (although subsample does not do an average). The results are saved * in the output array. The output array should already be allocated as we work * on it in place. Data should be striped in the x direction such that the * memory location of pixel i,j is data[nx *j + i]. */ void PyRebin(float* data, float* output, int nx, int ny) { PyDoc_STRVAR(PyRebin__doc__, "PyRebin(data, output, nx, ny) -> void\n \n" "Rebin an array 2x2, with size (2 * nx) x (2 * ny). Rebin the " "array by block averaging 4 pixels back into 1. This is " "effectively the opposite of subsample (although subsample does " "not do an average). The results are saved in the output array. " "The output array should already be allocated as we work on it in " "place. Data should be striped in the x direction such that the " "memory location of pixel i,j is data[nx *j + i]."); /* Size of original array */ int padnx = nx * 2; /* Loop variables */ int i, j, nxj, padnxj; /* Pixel value p. Each thread needs its own copy of this variable so we * wait to initialize it until the pragma below */ float p; #pragma omp parallel for firstprivate(output, data, nx, ny, padnx) \ private(i, j, nxj, padnxj, p) /*Loop over all of the pixels */ for (j = 0; j < ny; j++) { nxj = nx * j; padnxj = 2 * padnx * j; for (i = 0; i < nx; i++) { p = data[2 * i + padnxj]; p += data[2 * i + padnxj + padnx]; p += data[2 * i + 1 + padnxj + padnx]; p += data[2 * i + 1 + padnxj]; p /= 4.0; output[i + nxj] = p; } } return; } /* Convolve an image of size nx x ny with a kernel of size kernx x kerny. The * results are saved in the output array. The output array should already be * allocated as we work on it in place. Data and kernel should both be striped * in the x direction such that the memory location of pixel i,j is * data[nx *j + i]. */ void PyConvolve(float* data, float* kernel, float* output, int nx, int ny, int kernx, int kerny) { PyDoc_STRVAR(PyConvolve__doc__, "PyConvolve(data, kernel, output, nx, ny, kernx, kerny) -> void\n\n" "Convolve an image of size nx x ny with a a kernel of size " "kernx x kerny. The results are saved in the output array. The " "output array should already be allocated as we work on it in " "place. Data and kernel should both be striped along the x " "direction such that the memory location of pixel i,j is " "data[nx *j + i]."); /* Get the width of the borders that we will pad with zeros */ int bnx = (kernx - 1) / 2; int bny = (kerny - 1) / 2; /* Calculate the dimensions of the array including padded border */ int padnx = nx + kernx - 1; int padny = ny + kerny - 1; /* Get the total number of pixels in the padded array */ int padnxny = padnx * padny; /*Get the total number of pixels in the output image */ int nxny = nx * ny; /*Allocate the padded array */ float* padarr = (float *) malloc(padnxny * sizeof(float)); /* Loop variables. These should all be thread private. */ int i, j; int nxj; int padnxj; /* Inner loop variables. Again thread private. */ int k, l; int kernxl, padnxl; /* Define a sum variable to use in the convolution calculation. Each * thread needs its own copy of this so it should be thread private. */ float sum; /* Precompute maximum good index in each dimension */ int xmaxgood = nx + bnx; int ymaxgood = ny + bny; /* Set the borders of padarr = 0.0 * Fill the rest of the padded array with the input data. */ #pragma omp parallel for \ firstprivate(padarr, data, nx, padnx, padny, bnx, bny, xmaxgood, ymaxgood)\ private(nxj, padnxj, i, j) for (j = 0; j < padny; j++) { padnxj = padnx * j; nxj = nx * (j - bny); for (i = 0; i < padnx; i++) { if (i < bnx || j < bny || j >= ymaxgood || i >= xmaxgood) { padarr[padnxj + i] = 0.0; } else { padarr[padnxj + i] = data[nxj + i - bnx]; } } } /* Calculate the convolution */ /* Loop over all pixels */ #pragma omp parallel for \ firstprivate(padarr, output, nx, ny, padnx, bnx, bny, kernx) \ private(nxj, padnxj, kernxl, padnxl, i, j, k, l, sum) for (j = 0; j < ny; j++) { nxj = nx * j; /* Note the + bvy in padnxj */ padnxj = padnx * (j + bny); for (i = 0; i < nx; i++) { sum = 0.0; /* Note that the sums in the definition of the convolution go from * -border width to + border width */ for (l = -bny; l <= bny; l++) { padnxl = padnx * (l + j + bny); kernxl = kernx * (-l + bny); for (k = -bnx; k <= bnx; k++) { sum += kernel[bnx - k + kernxl] * padarr[padnxl + k + i + bnx]; } } output[nxj + i] = sum; } } free(padarr); return; } /* Convolve an image of size nx x ny the following kernel: * 0 -1 0 * -1 4 -1 * 0 -1 0 * The results are saved in the output array. The output array should * already be allocated as we work on it in place. * This is a discrete version of the Laplacian operator. * Data should be striped in the x direction such that the memory location of * pixel i,j is data[nx *j + i]. */ void PyLaplaceConvolve(float* data, float* output, int nx, int ny) { PyDoc_STRVAR(PyLaplaceConvolve__doc__, "PyLaplaceConvolve(data, output, nx, ny) -> void\n\n" "Convolve an image of size nx x ny the following kernel:\n" " 0 -1 0\n" "-1 4 -1\n" " 0 -1 0\n" "This is a discrete version of the Laplacian operator. The results" " are saved in the output array. The output array should already " "be allocated as we work on it in place.Data should be striped in " "the x direction such that the memory location of pixel i,j is " "data[nx *j + i]."); /* Precompute the total number of pixels in the image */ int nxny = nx * ny; /* Loop variables */ int i, j, nxj; /* Pixel value p. Each thread will need its own copy of this so we need to * make it private*/ float p; /* Because we know the form of the kernel, we can short circuit the * convolution and calculate the results with inner nest for loops. */ /*Loop over all of the pixels except the edges which we will do explicitly * below */ #pragma omp parallel for firstprivate(nx, ny, output, data) \ private(i, j, nxj, p) for (j = 1; j < ny - 1; j++) { nxj = nx * j; for (i = 1; i < nx - 1; i++) { p = 4.0 * data[nxj + i]; p -= data[i + 1 + nxj]; p -= data[i - 1 + nxj]; p -= data[i + nxj + nx]; p -= data[i + nxj - nx]; output[nxj + i] = p; } } /* Leave the corners until the very end */ #pragma omp parallel firstprivate(output, data, nx, nxny) private(i) /* Top and Bottom Rows */ for (i = 1; i < nx - 1; i++) { output[i] = 4.0 * data[i] - data[i + 1] - data[i - 1] - data[i + nx]; p = 4.0 * data[i + nxny - nx]; p -= data[i + 1 + nxny - nx]; p -= data[i + nxny - nx - 1]; p -= data[i - nx + nxny - nx]; output[i + nxny - nx] = p; } #pragma omp parallel firstprivate(output, data, nx, ny) private(j, nxj) /* First and Last Column */ for (j = 1; j < ny - 1; j++) { nxj = nx * j; p = 4.0 * data[nxj]; p -= data[nxj + 1]; p -= data[nxj + nx]; p -= data[nxj - nx]; output[nxj] = p; p = 4.0 * data[nxj + nx - 1]; p -= data[nxj + nx - 2]; p -= data[nxj + nx + nx - 1]; p -= data[nxj - 1]; output[nxj + nx - 1] = p; } /* Bottom Left Corner */ output[0] = 4.0 * data[0] - data[1] - data[nx]; /* Bottom Right Corner */ output[nx - 1] = 4.0 * data[nx - 1] - data[nx - 2] - data[nx + nx - 1]; /* Top Left Corner */ p = 4.0 * data[nxny - nx]; p -= data[nxny - nx + 1]; p -= data[nxny - nx - nx]; output[nxny - nx] = p; /* Top Right Corner */ p = 4.0 * data[nxny - 1]; p -= data[nxny - 2]; p -= data[nxny - 1 - nx]; output[nxny - 1] = p; return; } /* Perform a boolean dilation on an array of size nx x ny. The results are * saved in the output array. The output array should already be allocated as * we work on it in place. * Dilation is the boolean equivalent of a convolution but using logical ors * instead of a sum. * We apply the following kernel: * 1 1 1 * 1 1 1 * 1 1 1 * The binary dilation is not computed for a 1 pixel border around the image. * These pixels are copied from the input data. Data should be striped along * the x direction such that the memory location of pixel i,j is * data[i + nx * j]. */ void PyDilate3(bool* data, bool* output, int nx, int ny) { PyDoc_STRVAR(PyDilate3__doc__, "PyDilate3(data, output, nx, ny) -> void\n\n" "Perform a boolean dilation on an array of size nx x ny. The " "results are saved in the output array which should already be " "allocated as we work on it in place. " "Dilation is the boolean equivalent of a convolution but using " "logical or instead of a sum. We apply a 3x3 kernel of all ones. " "Dilation is not computed for a 1 pixel border which is copied " "from the input data. Data should be striped along the x-axis " "such that the location of pixel i,j is data[i + nx * j]."); /* Precompute the total number of pixels; minor optimization */ int nxny = nx * ny; /* Loop variables */ int i, j, nxj; /* Pixel value p. Each thread needs its own unique copy of this so we don't initialize this until the pragma below. */ bool p; #pragma omp parallel for firstprivate(output, data, nxny, nx, ny) \ private(i, j, nxj, p) /* Loop through all of the pixels excluding the border */ for (j = 1; j < ny - 1; j++) { nxj = nx * j; for (i = 1; i < nx - 1; i++) { /*Start in the middle and work out */ p = data[i + nxj]; /* Right 1 */ p = p || data[i + 1 + nxj]; /* Left 1 */ p = p || data[i - 1 + nxj]; /* Up 1 */ p = p || data[i + nx + nxj]; /* Down 1 */ p = p || data[i - nx + nxj]; /* Up 1 Right 1 */ p = p || data[i + 1 + nx + nxj]; /* Up 1 Left 1 */ p = p || data[i - 1 + nx + nxj]; /* Down 1 Right 1 */ p = p || data[i + 1 - nx + nxj]; /* Down 1 Left 1 */ p = p || data[i - 1 - nx + nxj]; output[i + nxj] = p; } } #pragma omp parallel firstprivate(output, data, nx, nxny) private(i) /* For the borders, copy the data from the input array */ for (i = 0; i < nx; i++) { output[i] = data[i]; output[nxny - nx + i] = data[nxny - nx + i]; } #pragma omp parallel firstprivate(output, data, nx, ny) private(j, nxj) for (j = 0; j < ny; j++) { nxj = nx * j; output[nxj] = data[nxj]; output[nxj - 1 + nx] = data[nxj - 1 + nx]; } return; } /* Do niter iterations of boolean dilation on an array of size nx x ny. The * results are saved in the output array. The output array should already be * allocated as we work on it in place. * Dilation is the boolean equivalent of a convolution but using logical ors * instead of a sum. * We apply the following kernel: * 0 1 1 1 0 * 1 1 1 1 1 * 1 1 1 1 1 * 1 1 1 1 1 * 0 1 1 1 0 * The edges are padded with zeros so that the dilation operator is defined for * all pixels. Data should be striped along the x direction such that the * memory location of pixel i,j is data[i + nx * j]. */ void PyDilate5(bool* data, bool* output, int niter, int nx, int ny) { PyDoc_STRVAR(PyDilate5__doc__, "PyDilate5(data, output, nx, ny) -> void\n\n" "Do niter iterations of boolean dilation on an array of size " "nx x ny. The results are saved in the output array. The output " "array should already be allocated as we work on it in place. " "Dilation is the boolean equivalent of a convolution but using " "logical ors instead of a sum. We apply the following kernel:\n" "0 1 1 1 0\n" "1 1 1 1 1\n" "1 1 1 1 1\n" "1 1 1 1 1\n" "0 1 1 1 0\n" "Data should be striped along the x direction such that the " "location of pixel i,j is data[i + nx * j]."); /* Pad the array with a border of zeros */ int padnx = nx + 4; int padny = ny + 4; /* Precompute the total number of pixels; minor optimization */ int padnxny = padnx * padny; int nxny = nx * ny; /* The padded array to work on */ bool* padarr = (bool *) malloc(padnxny * sizeof(bool)); /*Loop indices */ int i, j, nxj, padnxj; int iter; /* Pixel value p. This needs to be unique for each thread so we initialize * it below inside the pragma. */ bool p; #pragma omp parallel firstprivate(padarr, padnx, padnxny) private(i) /* Initialize the borders of the padded array to zero */ for (i = 0; i < padnx; i++) { padarr[i] = false; padarr[i + padnx] = false; padarr[padnxny - padnx + i] = false; padarr[padnxny - padnx - padnx + i] = false; } #pragma omp parallel firstprivate(padarr, padnx, padny) private(j, padnxj) for (j = 0; j < padny; j++) { padnxj = padnx * j; padarr[padnxj] = false; padarr[padnxj + 1] = false; padarr[padnxj + padnx - 1] = false; padarr[padnxj + padnx - 2] = false; } #pragma omp parallel firstprivate(output, data, nxny) private(i) /* Initialize the output array to the input data */ for (i = 0; i < nxny; i++) { output[i] = data[i]; } /* Outer iteration loop */ for (iter = 0; iter < niter; iter++) { #pragma omp parallel for firstprivate(padarr, output, nx, ny, padnx, iter) \ private(nxj, padnxj, i, j) /* Initialize the padded array to the output from the latest * iteration*/ for (j = 0; j < ny; j++) { padnxj = padnx * j; nxj = nx * j; for (i = 0; i < nx; i++) { padarr[i + 2 + padnx + padnx + padnxj] = output[i + nxj]; } } /* Loop over all pixels */ #pragma omp parallel for firstprivate(padarr, output, nx, ny, padnx, iter) \ private(nxj, padnxj, i, j, p) for (j = 0; j < ny; j++) { nxj = nx * j; /* Note the + 2 padding in padnxj */ padnxj = padnx * (j + 2); for (i = 0; i < nx; i++) { /* Start with the middle pixel and work out */ p = padarr[i + 2 + padnxj]; /* Right 1 */ p = p || padarr[i + 3 + padnxj]; /* Left 1 */ p = p || padarr[i + 1 + padnxj]; /* Up 1 */ p = p || padarr[i + 2 + padnx + padnxj]; /* Down 1 */ p = p || padarr[i + 2 - padnx + padnxj]; /* Up 1 Right 1 */ p = p || padarr[i + 3 + padnx + padnxj]; /* Up 1 Left 1 */ p = p || padarr[i + 1 + padnx + padnxj]; /* Down 1 Right 1 */ p = p || padarr[i + 3 - padnx + padnxj]; /* Down 1 Left 1 */ p = p || padarr[i + 1 - padnx + padnxj]; /* Right 2 */ p = p || padarr[i + 4 + padnxj]; /* Left 2 */ p = p || padarr[i + padnxj]; /* Up 2 */ p = p || padarr[i + 2 + padnx + padnx + padnxj]; /* Down 2 */ p = p || padarr[i + 2 - padnx - padnx + padnxj]; /* Right 2 Up 1 */ p = p || padarr[i + 4 + padnx + padnxj]; /* Right 2 Down 1 */ p = p || padarr[i + 4 - padnx + padnxj]; /* Left 2 Up 1 */ p = p || padarr[i + padnx + padnxj]; /* Left 2 Down 1 */ p = p || padarr[i - padnx + padnxj]; /* Up 2 Right 1 */ p = p || padarr[i + 3 + padnx + padnx + padnxj]; /* Up 2 Left 1 */ p = p || padarr[i + 1 + padnx + padnx + padnxj]; /* Down 2 Right 1 */ p = p || padarr[i + 3 - padnx - padnx + padnxj]; /* Down 2 Left 1 */ p = p || padarr[i + 1 - padnx - padnx + padnxj]; output[i + nxj] = p; } } } free(padarr); return; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait == UndefinedPixelTrait) && (draw_info->fill.alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; start=0; s=segment_stack; PushSegmentStack(y_offset,x_offset,x_offset,1); PushSegmentStack(y_offset+1,x_offset,x_offset,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(floodplane_image,image,floodplane_image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelViaPixelInfo(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method,const StopInfo *stops, const size_t number_stops,ExceptionInfo *exception) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(stops != (const StopInfo *) NULL); assert(number_stops > 0); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1.0; gradient->gradient_vector.y1=(double) image->rows-1.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1.0; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1.0; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1.0; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) image->columns*cosine)+ fabs((double) image->rows*sine); gradient->gradient_vector.x1=0.5*(image->columns-distance*cosine); gradient->gradient_vector.y1=0.5*(image->rows-distance*sine); gradient->gradient_vector.x2=0.5*(image->columns+distance*cosine); gradient->gradient_vector.y2=0.5*(image->rows+distance*sine); } gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt(image->columns*image->columns+ image->rows*image->rows))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) image->columns/2.0; gradient->radii.y=(double) image->rows/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) (MagickMin(image->columns,image->rows))/ 2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=number_stops; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) CopyMagickMemory(gradient->stops,stops,(size_t) number_stops* sizeof(*stops)); /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel=GetPixelChannelChannel(linear_image,i); PixelTrait traits=GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if (((paint_traits & CopyPixelTrait) != 0) || (GetPixelReadMask(linear_image,p) == 0)) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill argument. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target, % const PixelInfo *fill,const MagickBooleanType invert, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,fill,&conform_fill,exception); ConformPixelInfo(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert) SetPixelViaPixelInfo(image,&conform_fill,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImage) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; EIGEN_UNUSED_VARIABLE(m_maxThreads); if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} Index volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); int errorCount = 0; #pragma omp parallel num_threads(threads) reduction(+: errorCount) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; EIGEN_TRY { if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } EIGEN_CATCH(...) { ++errorCount; } } if (errorCount) EIGEN_THROW_X(Eigen::eigen_assert_exception()); #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
FillInLinearSystemImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/pipelines/kernel/FillInLinearSystem.h" #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) #include "open3d/t/pipelines/kernel/SVD3x3CUDA.cuh" #else #include "open3d/t/pipelines/kernel/SVD3x3CPU.h" #endif namespace open3d { namespace t { namespace pipelines { namespace kernel { #if defined(__CUDACC__) void FillInRigidAlignmentTermCUDA #else void FillInRigidAlignmentTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_ps, const core::Tensor &Tj_qs, const core::Tensor &Ri_normal_ps, int i, int j, float threshold) { core::Device device = AtA.GetDevice(); int64_t n = Ti_ps.GetLength(); if (Tj_qs.GetLength() != n || Ri_normal_ps.GetLength() != n) { utility::LogError( "Unable to setup linear system: input length mismatch."); } // First fill in a small 12 x 12 linear system core::Tensor AtA_local = core::Tensor::Zeros({12, 12}, core::Dtype::Float32, device); core::Tensor Atb_local = core::Tensor::Zeros({12}, core::Dtype::Float32, device); float *AtA_local_ptr = static_cast<float *>(AtA_local.GetDataPtr()); float *Atb_local_ptr = static_cast<float *>(Atb_local.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); const float *Ti_ps_ptr = static_cast<const float *>(Ti_ps.GetDataPtr()); const float *Tj_qs_ptr = static_cast<const float *>(Tj_qs.GetDataPtr()); const float *Ri_normal_ps_ptr = static_cast<const float *>(Ri_normal_ps.GetDataPtr()); #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { const float *p_prime = Ti_ps_ptr + 3 * workload_idx; const float *q_prime = Tj_qs_ptr + 3 * workload_idx; const float *normal_p_prime = Ri_normal_ps_ptr + 3 * workload_idx; float r = (p_prime[0] - q_prime[0]) * normal_p_prime[0] + (p_prime[1] - q_prime[1]) * normal_p_prime[1] + (p_prime[2] - q_prime[2]) * normal_p_prime[2]; if (abs(r) > threshold) return; float J_ij[12]; J_ij[0] = -q_prime[2] * normal_p_prime[1] + q_prime[1] * normal_p_prime[2]; J_ij[1] = q_prime[2] * normal_p_prime[0] - q_prime[0] * normal_p_prime[2]; J_ij[2] = -q_prime[1] * normal_p_prime[0] + q_prime[0] * normal_p_prime[1]; J_ij[3] = normal_p_prime[0]; J_ij[4] = normal_p_prime[1]; J_ij[5] = normal_p_prime[2]; for (int k = 0; k < 6; ++k) { J_ij[k + 6] = -J_ij[k]; } // Not optimized; Switch to reduction if necessary. #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) for (int i_local = 0; i_local < 12; ++i_local) { for (int j_local = 0; j_local < 12; ++j_local) { atomicAdd(&AtA_local_ptr[i_local * 12 + j_local], J_ij[i_local] * J_ij[j_local]); } atomicAdd(&Atb_local_ptr[i_local], J_ij[i_local] * r); } atomicAdd(residual_ptr, r * r); #else #pragma omp critical { for (int i_local = 0; i_local < 12; ++i_local) { for (int j_local = 0; j_local < 12; ++j_local) { AtA_local_ptr[i_local * 12 + j_local] += J_ij[i_local] * J_ij[j_local]; } Atb_local_ptr[i_local] += J_ij[i_local] * r; } *residual_ptr += r * r; } #endif }); // Then fill-in the large linear system std::vector<int64_t> indices_vec(12); for (int k = 0; k < 6; ++k) { indices_vec[k] = i * 6 + k; indices_vec[k + 6] = j * 6 + k; } std::vector<int64_t> indices_i_vec; std::vector<int64_t> indices_j_vec; for (int local_i = 0; local_i < 12; ++local_i) { for (int local_j = 0; local_j < 12; ++local_j) { indices_i_vec.push_back(indices_vec[local_i]); indices_j_vec.push_back(indices_vec[local_j]); } } core::Tensor indices(indices_vec, {12}, core::Dtype::Int64, device); core::Tensor indices_i(indices_i_vec, {12 * 12}, core::Dtype::Int64, device); core::Tensor indices_j(indices_j_vec, {12 * 12}, core::Dtype::Int64, device); core::Tensor AtA_sub = AtA.IndexGet({indices_i, indices_j}); AtA.IndexSet({indices_i, indices_j}, AtA_sub + AtA_local.View({12 * 12})); core::Tensor Atb_sub = Atb.IndexGet({indices}); Atb.IndexSet({indices}, Atb_sub + Atb_local.View({12, 1})); } #if defined(__CUDACC__) void FillInSLACAlignmentTermCUDA #else void FillInSLACAlignmentTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_Cps, const core::Tensor &Tj_Cqs, const core::Tensor &Cnormal_ps, const core::Tensor &Ri_Cnormal_ps, const core::Tensor &RjT_Ri_Cnormal_ps, const core::Tensor &cgrid_idx_ps, const core::Tensor &cgrid_idx_qs, const core::Tensor &cgrid_ratio_qs, const core::Tensor &cgrid_ratio_ps, int i, int j, int n_frags, float threshold) { int64_t n = Ti_Cps.GetLength(); if (Tj_Cqs.GetLength() != n || Cnormal_ps.GetLength() != n || Ri_Cnormal_ps.GetLength() != n || RjT_Ri_Cnormal_ps.GetLength() != n || cgrid_idx_ps.GetLength() != n || cgrid_ratio_ps.GetLength() != n || cgrid_idx_qs.GetLength() != n || cgrid_ratio_qs.GetLength() != n) { utility::LogError( "Unable to setup linear system: input length mismatch."); } int n_vars = Atb.GetLength(); float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr()); float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); // Geometric properties const float *Ti_Cps_ptr = static_cast<const float *>(Ti_Cps.GetDataPtr()); const float *Tj_Cqs_ptr = static_cast<const float *>(Tj_Cqs.GetDataPtr()); const float *Cnormal_ps_ptr = static_cast<const float *>(Cnormal_ps.GetDataPtr()); const float *Ri_Cnormal_ps_ptr = static_cast<const float *>(Ri_Cnormal_ps.GetDataPtr()); const float *RjT_Ri_Cnormal_ps_ptr = static_cast<const float *>(RjT_Ri_Cnormal_ps.GetDataPtr()); // Association properties const int *cgrid_idx_ps_ptr = static_cast<const int *>(cgrid_idx_ps.GetDataPtr()); const int *cgrid_idx_qs_ptr = static_cast<const int *>(cgrid_idx_qs.GetDataPtr()); const float *cgrid_ratio_ps_ptr = static_cast<const float *>(cgrid_ratio_ps.GetDataPtr()); const float *cgrid_ratio_qs_ptr = static_cast<const float *>(cgrid_ratio_qs.GetDataPtr()); #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { const float *Ti_Cp = Ti_Cps_ptr + 3 * workload_idx; const float *Tj_Cq = Tj_Cqs_ptr + 3 * workload_idx; const float *Cnormal_p = Cnormal_ps_ptr + 3 * workload_idx; const float *Ri_Cnormal_p = Ri_Cnormal_ps_ptr + 3 * workload_idx; const float *RjTRi_Cnormal_p = RjT_Ri_Cnormal_ps_ptr + 3 * workload_idx; const int *cgrid_idx_p = cgrid_idx_ps_ptr + 8 * workload_idx; const int *cgrid_idx_q = cgrid_idx_qs_ptr + 8 * workload_idx; const float *cgrid_ratio_p = cgrid_ratio_ps_ptr + 8 * workload_idx; const float *cgrid_ratio_q = cgrid_ratio_qs_ptr + 8 * workload_idx; float r = (Ti_Cp[0] - Tj_Cq[0]) * Ri_Cnormal_p[0] + (Ti_Cp[1] - Tj_Cq[1]) * Ri_Cnormal_p[1] + (Ti_Cp[2] - Tj_Cq[2]) * Ri_Cnormal_p[2]; if (abs(r) > threshold) return; // Now we fill in a 60 x 60 sub-matrix: 2 x (6 + 8 x 3) float J[60]; int idx[60]; // Jacobian w.r.t. Ti: 0-6 J[0] = -Tj_Cq[2] * Ri_Cnormal_p[1] + Tj_Cq[1] * Ri_Cnormal_p[2]; J[1] = Tj_Cq[2] * Ri_Cnormal_p[0] - Tj_Cq[0] * Ri_Cnormal_p[2]; J[2] = -Tj_Cq[1] * Ri_Cnormal_p[0] + Tj_Cq[0] * Ri_Cnormal_p[1]; J[3] = Ri_Cnormal_p[0]; J[4] = Ri_Cnormal_p[1]; J[5] = Ri_Cnormal_p[2]; // Jacobian w.r.t. Tj: 6-12 for (int k = 0; k < 6; ++k) { J[k + 6] = -J[k]; idx[k + 0] = 6 * i + k; idx[k + 6] = 6 * j + k; } // Jacobian w.r.t. C over p: 12-36 for (int k = 0; k < 8; ++k) { J[12 + k * 3 + 0] = cgrid_ratio_p[k] * Cnormal_p[0]; J[12 + k * 3 + 1] = cgrid_ratio_p[k] * Cnormal_p[1]; J[12 + k * 3 + 2] = cgrid_ratio_p[k] * Cnormal_p[2]; idx[12 + k * 3 + 0] = 6 * n_frags + cgrid_idx_p[k] * 3 + 0; idx[12 + k * 3 + 1] = 6 * n_frags + cgrid_idx_p[k] * 3 + 1; idx[12 + k * 3 + 2] = 6 * n_frags + cgrid_idx_p[k] * 3 + 2; } // Jacobian w.r.t. C over q: 36-60 for (int k = 0; k < 8; ++k) { J[36 + k * 3 + 0] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[0]; J[36 + k * 3 + 1] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[1]; J[36 + k * 3 + 2] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[2]; idx[36 + k * 3 + 0] = 6 * n_frags + cgrid_idx_q[k] * 3 + 0; idx[36 + k * 3 + 1] = 6 * n_frags + cgrid_idx_q[k] * 3 + 1; idx[36 + k * 3 + 2] = 6 * n_frags + cgrid_idx_q[k] * 3 + 2; } // Not optimized; Switch to reduction if necessary. #if defined(__CUDACC__) for (int ki = 0; ki < 60; ++ki) { for (int kj = 0; kj < 60; ++kj) { float AtA_ij = J[ki] * J[kj]; int ij = idx[ki] * n_vars + idx[kj]; atomicAdd(AtA_ptr + ij, AtA_ij); } float Atb_i = J[ki] * r; atomicAdd(Atb_ptr + idx[ki], Atb_i); } atomicAdd(residual_ptr, r * r); #else #pragma omp critical { for (int ki = 0; ki < 60; ++ki) { for (int kj = 0; kj < 60; ++kj) { AtA_ptr[idx[ki] * n_vars + idx[kj]] += J[ki] * J[kj]; } Atb_ptr[idx[ki]] += J[ki] * r; } *residual_ptr += r * r; } #endif }); } inline OPEN3D_HOST_DEVICE void matmul3x3_3x1(float m00, float m01, float m02, float m10, float m11, float m12, float m20, float m21, float m22, float v0, float v1, float v2, float &o0, float &o1, float &o2) { o0 = m00 * v0 + m01 * v1 + m02 * v2; o1 = m10 * v0 + m11 * v1 + m12 * v2; o2 = m20 * v0 + m21 * v1 + m22 * v2; } inline OPEN3D_HOST_DEVICE void matmul3x3_3x3(float a00, float a01, float a02, float a10, float a11, float a12, float a20, float a21, float a22, float b00, float b01, float b02, float b10, float b11, float b12, float b20, float b21, float b22, float &c00, float &c01, float &c02, float &c10, float &c11, float &c12, float &c20, float &c21, float &c22) { matmul3x3_3x1(a00, a01, a02, a10, a11, a12, a20, a21, a22, b00, b10, b20, c00, c10, c20); matmul3x3_3x1(a00, a01, a02, a10, a11, a12, a20, a21, a22, b01, b11, b21, c01, c11, c21); matmul3x3_3x1(a00, a01, a02, a10, a11, a12, a20, a21, a22, b02, b12, b22, c02, c12, c22); } inline OPEN3D_HOST_DEVICE float det3x3(float m00, float m01, float m02, float m10, float m11, float m12, float m20, float m21, float m22) { return m00 * (m11 * m22 - m12 * m21) - m10 * (m01 * m22 - m02 - m21) + m20 * (m01 * m12 - m02 * m11); } #if defined(__CUDACC__) void FillInSLACRegularizerTermCUDA #else void FillInSLACRegularizerTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &grid_idx, const core::Tensor &grid_nbs_idx, const core::Tensor &grid_nbs_mask, const core::Tensor &positions_init, const core::Tensor &positions_curr, float weight, int n_frags, int anchor_idx) { int64_t n = grid_idx.GetLength(); int64_t n_vars = Atb.GetLength(); float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr()); float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); const int *grid_idx_ptr = static_cast<const int *>(grid_idx.GetDataPtr()); const int *grid_nbs_idx_ptr = static_cast<const int *>(grid_nbs_idx.GetDataPtr()); const bool *grid_nbs_mask_ptr = static_cast<const bool *>(grid_nbs_mask.GetDataPtr()); const float *positions_init_ptr = static_cast<const float *>(positions_init.GetDataPtr()); const float *positions_curr_ptr = static_cast<const float *>(positions_curr.GetDataPtr()); #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { // Enumerate 6 neighbors int idx_i = grid_idx_ptr[workload_idx]; const int *idx_nbs = grid_nbs_idx_ptr + 6 * workload_idx; const bool *mask_nbs = grid_nbs_mask_ptr + 6 * workload_idx; // Build a 3x3 linear system to compute the local R float cov[3][3] = {{0}}; float U[3][3], V[3][3], S[3]; int cnt = 0; for (int k = 0; k < 6; ++k) { bool mask_k = mask_nbs[k]; if (!mask_k) continue; int idx_k = idx_nbs[k]; // Now build linear systems float diff_ik_init[3] = {positions_init_ptr[idx_i * 3 + 0] - positions_init_ptr[idx_k * 3 + 0], positions_init_ptr[idx_i * 3 + 1] - positions_init_ptr[idx_k * 3 + 1], positions_init_ptr[idx_i * 3 + 2] - positions_init_ptr[idx_k * 3 + 2]}; float diff_ik_curr[3] = {positions_curr_ptr[idx_i * 3 + 0] - positions_curr_ptr[idx_k * 3 + 0], positions_curr_ptr[idx_i * 3 + 1] - positions_curr_ptr[idx_k * 3 + 1], positions_curr_ptr[idx_i * 3 + 2] - positions_curr_ptr[idx_k * 3 + 2]}; // Build linear system by computing XY^T when formulating Y = RX // Y: curr // X: init for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] += diff_ik_init[i] * diff_ik_curr[j]; } } ++cnt; } if (cnt < 3) { return; } // clang-format off svd(cov[0][0], cov[0][1], cov[0][2], cov[1][0], cov[1][1], cov[1][2], cov[2][0], cov[2][1], cov[2][2], U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2], S[0], S[1], S[2], V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]); // TODO: det3x3 and matmul3x3 float R[3][3]; // clang-format off matmul3x3_3x3(V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2], U[0][0], U[1][0], U[2][0], U[0][1], U[1][1], U[2][1], U[0][2], U[1][2], U[2][2], R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2]); float d = det3x3(R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2]); // clang-format on if (d < 0) { // clang-format off matmul3x3_3x3(V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2], U[0][0], U[1][0], U[2][0], U[0][1], U[1][1], U[2][1], -U[0][2], -U[1][2], -U[2][2], R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2]); // clang-format on } // Now we have R, we build Hessian and residuals // But first, we need to anchor a point if (idx_i == anchor_idx) { R[0][0] = R[1][1] = R[2][2] = 1; R[0][1] = R[0][2] = R[1][0] = R[1][2] = R[2][0] = R[2][1] = 0; } for (int k = 0; k < 6; ++k) { bool mask_k = mask_nbs[k]; if (mask_k) { int idx_k = idx_nbs[k]; float diff_ik_init[3] = { positions_init_ptr[idx_i * 3 + 0] - positions_init_ptr[idx_k * 3 + 0], positions_init_ptr[idx_i * 3 + 1] - positions_init_ptr[idx_k * 3 + 1], positions_init_ptr[idx_i * 3 + 2] - positions_init_ptr[idx_k * 3 + 2]}; float diff_ik_curr[3] = { positions_curr_ptr[idx_i * 3 + 0] - positions_curr_ptr[idx_k * 3 + 0], positions_curr_ptr[idx_i * 3 + 1] - positions_curr_ptr[idx_k * 3 + 1], positions_curr_ptr[idx_i * 3 + 2] - positions_curr_ptr[idx_k * 3 + 2]}; float R_diff_ik_curr[3]; // clang-format off matmul3x3_3x1(R[0][0], R[0][1], R[0][2], R[1][0], R[1][1], R[1][2], R[2][0], R[2][1], R[2][2], diff_ik_init[0], diff_ik_init[1], diff_ik_init[2], R_diff_ik_curr[0], R_diff_ik_curr[1], R_diff_ik_curr[2]); // clang-format on float local_r[3]; local_r[0] = diff_ik_curr[0] - R_diff_ik_curr[0]; local_r[1] = diff_ik_curr[1] - R_diff_ik_curr[1]; local_r[2] = diff_ik_curr[2] - R_diff_ik_curr[2]; int offset_idx_i = 3 * idx_i + 6 * n_frags; int offset_idx_k = 3 * idx_k + 6 * n_frags; #if defined(__CUDACC__) // Update residual atomicAdd(residual_ptr, weight * (local_r[0] * local_r[0] + local_r[1] * local_r[1] + local_r[2] * local_r[2])); for (int axis = 0; axis < 3; ++axis) { // Update AtA: 2x2 atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_i + axis], weight); atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_k + axis], weight); atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_k + axis], -weight); atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_i + axis], -weight); // Update Atb: 2x1 atomicAdd(&Atb_ptr[offset_idx_i + axis], +weight * local_r[axis]); atomicAdd(&Atb_ptr[offset_idx_k + axis], -weight * local_r[axis]); } #else #pragma omp critical { // Update residual *residual_ptr += weight * (local_r[0] * local_r[0] + local_r[1] * local_r[1] + local_r[2] * local_r[2]); for (int axis = 0; axis < 3; ++axis) { // Update AtA: 2x2 AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_i + axis] += weight; AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_k + axis] += weight; AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_k + axis] -= weight; AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_i + axis] -= weight; // Update Atb: 2x1 Atb_ptr[offset_idx_i + axis] += weight * local_r[axis]; Atb_ptr[offset_idx_k + axis] -= weight * local_r[axis]; } } #endif } } }); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
NeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_iNeighborhoodScale(2), m_iCEFScale(2), m_iRefineIter(2), m_iCEF(1000), m_iMaxCheckForRefineGraph(10000) { m_pNeighborhoodGraph.SetName("Graph"); } ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) = 0; template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::cout << "build RNG graph!" << std::endl; m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale; m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); std::cout << "Build RNG Graph end!" << std::endl; return; } { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize); std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize)); std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>()); for (SizeType i = 0; i < m_iGraphSize; i++) for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) (NeighborhoodDists)[i][j] = MaxDist; std::cout << "Parallel TpTree Partition begin " << std::endl; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; i++) { Sleep(i * 100); std::srand(clock()); for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); std::cout << "Finish Getting Leaves for Tree " << i << std::endl; } std::cout << "Parallel TpTree Partition done" << std::endl; for (int i = 0; i < m_iTPTNumber; i++) { #pragma omp parallel for schedule(dynamic) for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++) { SizeType start_index = TptreeLeafNodes[i][j].first; SizeType end_index = TptreeLeafNodes[i][j].second; if (omp_get_thread_num() == 0) std::cout << "\rProcessing Tree " << i << ' ' << j * 100 / TptreeLeafNodes[i].size() << '%'; for (SizeType x = start_index; x < end_index; x++) { for (SizeType y = x + 1; y <= end_index; y++) { SizeType p1 = TptreeDataIndices[i][x]; SizeType p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); std::cout << std::endl; } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); } if (m_iMaxCheckForRefineGraph > 0) { RefineGraph<T>(index, idmap); } } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { m_iCEF *= m_iCEFScale; for (int iter = 0; iter < m_iRefineIter - 1; iter++) { #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false); if (i % 1000 == 0) std::cout << "\rRefine " << iter << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; } m_iCEF /= m_iCEFScale; m_iNeighborhoodSize /= m_iNeighborhoodScale; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false); if (i % 1000 == 0) std::cout << "\rRefine " << (m_iRefineIter - 1) << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices, std::ostream* output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { SizeType R = (SizeType)indices.size(); if (newGraph != nullptr) { newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize); newGraph->m_iGraphSize = R; newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize; } #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < R; i++) { RefineNode<T>(index, indices[i], false, false); SizeType *nodes, *outnodes; nodes = outnodes = m_pNeighborhoodGraph[indices[i]]; if (newGraph != nullptr) outnodes = newGraph->m_pNeighborhoodGraph[i]; std::unordered_map<SizeType, SizeType>::const_iterator iter; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (nodes[j] >= 0 && nodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[nodes[j]]; if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second; } if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end()) outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second; } if (output != nullptr) { output->write((char*)&R, sizeof(SizeType)); output->write((char*)&m_iNeighborhoodSize, sizeof(DimensionType)); for (SizeType i = 0; i < R; i++) { output->write((char*)m_pNeighborhoodGraph[indices[i]], sizeof(SizeType) * m_iNeighborhoodSize); } std::cout << "Save Refine " << m_pNeighborhoodGraph.Name() << " (" << R << ", " << m_iNeighborhoodSize << ") Finish!" << std::endl; } return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), m_iCEF + 1); index->SearchIndex(query, searchDeleted); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), m_iCEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= m_iCEF; j++) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; InsertNeighbors(index, item->VID, node, item->Dist); } } } template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, std::vector<std::pair<SizeType, SizeType>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.push_back(std::make_pair(first, last)); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (DimensionType j = 0; j < index->GetFeatureDim(); j++) { Variance.push_back(BasicResult(j, 0)); } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<SizeType> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; i++) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; i++) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] /= sumweight; } float mean = 0; for (SizeType j = 0; j < count; j++) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (SizeType j = 0; j < count; j++) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { bestweight[j] = weight[j]; } } } SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } inline std::uint64_t BufferSize() const { return m_pNeighborhoodGraph.BufferSize(); } bool LoadGraph(std::string sGraphFilename) { if (!m_pNeighborhoodGraph.Load(sGraphFilename)) return false; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return true; } bool LoadGraph(char* pGraphMemFile) { m_pNeighborhoodGraph.Load(pGraphMemFile); m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return true; } bool SaveGraph(std::string sGraphFilename) const { std::cout << "Save " << m_pNeighborhoodGraph.Name() << " To " << sGraphFilename << std::endl; std::ofstream output(sGraphFilename, std::ios::binary); if (!output.is_open()) return false; SaveGraph(output); output.close(); return true; } bool SaveGraph(std::ostream& output) const { output.write((char*)&m_iGraphSize, sizeof(SizeType)); output.write((char*)&m_iNeighborhoodSize, sizeof(DimensionType)); for (SizeType i = 0; i < m_iGraphSize; i++) output.write((char*)m_pNeighborhoodGraph[i], sizeof(SizeType) * m_iNeighborhoodSize); std::cout << "Save " << m_pNeighborhoodGraph.Name() << " (" << m_iGraphSize << ", " << m_iNeighborhoodSize << ") Finish!" << std::endl; return true; } inline ErrorCode AddBatch(SizeType num) { ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num); if (ret != ErrorCode::Success) return ret; m_iGraphSize += num; return ErrorCode::Success; } inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; } inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; } void Update(SizeType row, DimensionType col, SizeType val) { std::lock_guard<std::mutex> lock(m_dataUpdateLock); m_pNeighborhoodGraph[row][col] = val; } inline void SetR(SizeType rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; } inline SizeType R() const { return m_iGraphSize; } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure SizeType m_iGraphSize; COMMON::Dataset<SizeType> m_pNeighborhoodGraph; std::mutex m_dataUpdateLock; public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; DimensionType m_iNeighborhoodSize; int m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iMaxCheckForRefineGraph; }; } } #endif
SE_fgg_expand_all_mex.c
#include "mex.h" #include "SE_fgg.h" void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int); #define X prhs[0] #define OPT prhs[1] #define ZX plhs[0] // Output #define ZY plhs[1] // Output #define ZZ plhs[2] // Output #define IDX plhs[3] // Output #ifndef VERBOSE #define VERBOSE 0 #endif void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { const int N = mxGetM(X); double* restrict x = mxGetPr(X); // pack parameters SE_FGG_params params; SE_FGG_MEX_params(&params, OPT, N); // allocate output array ZX = mxCreateDoubleMatrix(params.P,N,mxREAL); ZY = mxCreateDoubleMatrix(params.P,N,mxREAL); ZZ = mxCreateDoubleMatrix(params.P,N,mxREAL); // output const size_t dims[2] = {N,1}; IDX = mxCreateNumericArray(2,dims,mxINT32_CLASS,mxREAL); // wrap in SE_work struct SE_FGG_work work; work.zx = mxGetPr(ZX); work.zy = mxGetPr(ZY); work.zz = mxGetPr(ZZ); work.idx = (int*)mxGetData(IDX); // coordinates and charges const SE_state st = {.x = x, .q = NULL}; if(VERBOSE) mexPrintf("[SE%s FG(E)] N=%d, P=%d\n",PER_STR,N,params.P); #ifdef _OPENMP #pragma omp parallel default(shared) #endif { // now do the work (COMPLIED FOR 2P OR 3P) SE_FGG_expand_all(&work, &st, &params); } // done }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/ASTContext.h" #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// Representation of an OpenMP canonical loop. /// /// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form /// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form /// OpenMP 4.0, section 2.6 Canonical Loop Form /// OpenMP 4.5, section 2.6 Canonical Loop Form /// OpenMP 5.0, section 2.9.1 Canonical Loop Form /// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form /// /// An OpenMP canonical loop is a for-statement or range-based for-statement /// with additional requirements that ensure that the number of iterations is /// known before entering the loop and allow skipping to an arbitrary iteration. /// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is /// known to fulfill OpenMP's canonical loop requirements because of being /// associated to an OMPLoopBasedDirective. That is, the general structure is: /// /// OMPLoopBasedDirective /// [`- CapturedStmt ] /// [ `- CapturedDecl] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- Stmt /// /// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some /// directives such as OMPParallelForDirective, but others do not need them /// (such as OMPTileDirective). In The OMPCanonicalLoop and /// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the /// directive. A OMPCanonicalLoop must not appear in the AST unless associated /// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the /// OMPCanonicalLoop may also be wrapped in a CompoundStmt: /// /// [...] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- CompoundStmt /// |- Leading in-between code (if any) /// |- OMPCanonicalLoop /// | `- ForStmt/CXXForRangeStmt /// | `- ... /// `- Trailing in-between code (if any) /// /// The leading/trailing in-between code must not itself be a OMPCanonicalLoop /// to avoid confusion which loop belongs to the nesting. /// /// There are three different kinds of iteration variables for different /// purposes: /// * Loop user variable: The user-accessible variable with different value for /// each iteration. /// * Loop iteration variable: The variable used to identify a loop iteration; /// for range-based for-statement, this is the hidden iterator '__begin'. For /// other loops, it is identical to the loop user variable. Must be a /// random-access iterator, pointer or integer type. /// * Logical iteration counter: Normalized loop counter starting at 0 and /// incrementing by one at each iteration. Allows abstracting over the type /// of the loop iteration variable and is always an unsigned integer type /// appropriate to represent the range of the loop iteration variable. Its /// value corresponds to the logical iteration number in the OpenMP /// specification. /// /// This AST node provides two captured statements: /// * The distance function which computes the number of iterations. /// * The loop user variable function that computes the loop user variable when /// given a logical iteration number. /// /// These captured statements provide the link between C/C++ semantics and the /// logical iteration counters used by the OpenMPIRBuilder which is /// language-agnostic and therefore does not know e.g. how to advance a /// random-access iterator. The OpenMPIRBuilder will use this information to /// apply simd, workshare-loop, distribute, taskloop and loop directives to the /// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an /// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an /// OMPLoopDirective and skipped when searching for the associated syntactical /// loop. /// /// Example: /// <code> /// std::vector<std::string> Container{1,2,3}; /// for (std::string Str : Container) /// Body(Str); /// </code> /// which is syntactic sugar for approximately: /// <code> /// auto &&__range = Container; /// auto __begin = std::begin(__range); /// auto __end = std::end(__range); /// for (; __begin != __end; ++__begin) { /// std::String Str = *__begin; /// Body(Str); /// } /// </code> /// In this example, the loop user variable is `Str`, the loop iteration /// variable is `__begin` of type `std::vector<std::string>::iterator` and the /// logical iteration number type is `size_t` (unsigned version of /// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`). /// Therefore, the distance function will be /// <code> /// [&](size_t &Result) { Result = __end - __begin; } /// </code> /// and the loop variable function is /// <code> /// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) { /// Result = __begin + Logical; /// } /// </code> /// The variable `__begin`, aka the loop iteration variable, is captured by /// value because it is modified in the loop body, but both functions require /// the initial value. The OpenMP specification explicitly leaves unspecified /// when the loop expressions are evaluated such that a capture by reference is /// sufficient. class OMPCanonicalLoop : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Children of this AST node. enum { LOOP_STMT, DISTANCE_FUNC, LOOPVAR_FUNC, LOOPVAR_REF, LastSubStmt = LOOPVAR_REF }; private: /// This AST node's children. Stmt *SubStmts[LastSubStmt + 1] = {}; OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {} public: /// Create a new OMPCanonicalLoop. static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt, CapturedStmt *DistanceFunc, CapturedStmt *LoopVarFunc, DeclRefExpr *LoopVarRef) { OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop(); S->setLoopStmt(LoopStmt); S->setDistanceFunc(DistanceFunc); S->setLoopVarFunc(LoopVarFunc); S->setLoopVarRef(LoopVarRef); return S; } /// Create an empty OMPCanonicalLoop for deserialization. static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) { return new (Ctx) OMPCanonicalLoop(); } static bool classof(const Stmt *S) { return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass; } SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); } SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); } /// Return this AST node's children. /// @{ child_range children() { return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } /// @} /// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt). /// @{ Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; } const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; } void setLoopStmt(Stmt *S) { assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) && "Canonical loop must be a for loop (range-based or otherwise)"); SubStmts[LOOP_STMT] = S; } /// @} /// The function that computes the number of loop iterations. Can be evaluated /// before entering the loop but after the syntactical loop's init /// statement(s). /// /// Function signature: void(LogicalTy &Result) /// Any values necessary to compute the distance are captures of the closure. /// @{ CapturedStmt *getDistanceFunc() { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } const CapturedStmt *getDistanceFunc() const { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } void setDistanceFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[DISTANCE_FUNC] = S; } /// @} /// The function that computes the loop user variable from a logical iteration /// counter. Can be evaluated as first statement in the loop. /// /// Function signature: void(LoopVarTy &Result, LogicalTy Number) /// Any other values required to compute the loop user variable (such as start /// value, step size) are captured by the closure. In particular, the initial /// value of loop iteration variable is captured by value to be unaffected by /// previous iterations. /// @{ CapturedStmt *getLoopVarFunc() { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } const CapturedStmt *getLoopVarFunc() const { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } void setLoopVarFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[LOOPVAR_FUNC] = S; } /// @} /// Reference to the loop user variable as accessed in the loop body. /// @{ DeclRefExpr *getLoopVarRef() { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } const DeclRefExpr *getLoopVarRef() const { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } void setLoopVarRef(DeclRefExpr *E) { assert(E && "Expected non-null loop variable"); SubStmts[LOOPVAR_REF] = E; } /// @} }; /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Kind of the directive. OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { if (!Data) return llvm::None; return Data->getClauses(); } protected: /// Data, associated with the directive. OMPChildren *Data = nullptr; /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)) {} template <typename T, typename... Params> static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses, AssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T, typename... Params> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T; Inst->Data = Data; return Inst; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> static const SpecificClause *getSingleClause(ArrayRef<OMPClause *> Clauses) { auto ClausesOfKind = getClausesOfKind<SpecificClause>(Clauses); if (ClausesOfKind.begin() != ClausesOfKind.end()) { assert(std::next(ClausesOfKind.begin()) == ClausesOfKind.end() && "There are at least 2 clauses of the specified kind"); return *ClausesOfKind.begin(); } return nullptr; } template <typename SpecificClause> const SpecificClause *getSingleClause() const { return getSingleClause<SpecificClause>(clauses()); } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { if (!Data) return 0; return Data->getNumClauses(); } /// Returns specified clause. /// /// \param I Number of clause. /// OMPClause *getClause(unsigned I) const { return clauses()[I]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getAssociatedStmt(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getCapturedStmt(RegionKind, CaptureRegions); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getInnermostCapturedStmt(CaptureRegions); } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!Data) return child_range(child_iterator(), child_iterator()); return Data->getAssociatedStmtAsRange(); } const_child_range children() const { return const_cast<OMPExecutableDirective *>(this)->children(); } ArrayRef<OMPClause *> clauses() const { if (!Data) return llvm::None; return Data->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const { return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock(); } Stmt *getStructuredBlock(); const Stmt *getRawStmt() const { return const_cast<OMPExecutableDirective *>(this)->getRawStmt(); } Stmt *getRawStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getRawStmt(); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelDirective() : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// The base class for all loop-based directives, including loop transformation /// directives. class OMPLoopBasedDirective : public OMPExecutableDirective { friend class ASTStmtReader; protected: /// Number of collapsed loops as specified by 'collapse' clause. unsigned NumAssociatedLoops = 0; /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param NumAssociatedLoops Number of loops associated with the construct. /// OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc), NumAssociatedLoops(NumAssociatedLoops) {} public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned I = 0; I < Size; ++I) { Counters[I] = nullptr; PrivateCounters[I] = nullptr; Inits[I] = nullptr; Updates[I] = nullptr; Finals[I] = nullptr; DependentCounters[I] = nullptr; DependentInits[I] = nullptr; FinalsConditions[I] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getLoopsNumber() const { return NumAssociatedLoops; } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback, llvm::function_ref<void(OMPLoopTransformationDirective *)> OnTransformationCallback); static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback, llvm::function_ref<void(const OMPLoopTransformationDirective *)> OnTransformationCallback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; auto &&NewTransformCb = [OnTransformationCallback](OMPLoopTransformationDirective *A) { OnTransformationCallback(A); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback, NewTransformCb); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback) { auto &&TransformCb = [](OMPLoopTransformationDirective *) {}; return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback, TransformCb); } static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, const Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } /// Calls the specified callback function for all the loop bodies in \p /// CurStmt, from the outermost loop to the innermost. static void doForAllLoopsBodies( Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback); static void doForAllLoopsBodies( const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) { Callback(Cnt, Loop, Body); }; doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } static bool classof(const Stmt *T) { if (auto *D = dyn_cast<OMPExecutableDirective>(T)) return isOpenMPLoopDirective(D->getDirectiveKind()); return false; } }; /// The base class for all loop transformation directives. class OMPLoopTransformationDirective : public OMPLoopBasedDirective { friend class ASTStmtReader; /// Number of loops generated by this loop transformation. unsigned NumGeneratedLoops = 0; protected: explicit OMPLoopTransformationDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops) {} /// Set the number of loops generated by this loop transformation. void setNumGeneratedLoops(unsigned Num) { NumGeneratedLoops = Num; } public: /// Return the number of associated (consumed) loops. unsigned getNumAssociatedLoops() const { return getLoopsNumber(); } /// Return the number of loops generated by this loop transformation. unsigned getNumGeneratedLoops() { return NumGeneratedLoops; } /// Get the de-sugared statements after after the loop transformation. /// /// Might be nullptr if either the directive generates no loops and is handled /// directly in CodeGen, or resolving a template-dependence context is /// required. Stmt *getTransformedStmt() const; /// Return preinits statement. Stmt *getPreInits() const; static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTileDirectiveClass || T->getStmtClass() == OMPUnrollDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPLoopBasedDirective { friend class ASTStmtReader; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length NumAssociatedLoops are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { IterationVariableOffset = 0, LastIterationOffset = 1, CalcLastIterationOffset = 2, PreConditionOffset = 3, CondOffset = 4, InitOffset = 5, IncOffset = 6, PreInitsOffset = 7, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 8, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 8, LowerBoundVariableOffset = 9, UpperBoundVariableOffset = 10, StrideVariableOffset = 11, EnsureUpperBoundOffset = 12, NextLowerBoundOffset = 13, NextUpperBoundOffset = 14, NumIterationsOffset = 15, // Offset to the end for worksharing loop directives. WorksharingEnd = 16, PrevLowerBoundVariableOffset = 16, PrevUpperBoundVariableOffset = 17, DistIncOffset = 18, PrevEnsureUpperBoundOffset = 19, CombinedLowerBoundVariableOffset = 20, CombinedUpperBoundVariableOffset = 21, CombinedEnsureUpperBoundOffset = 22, CombinedInitOffset = 23, CombinedConditionOffset = 24, CombinedNextLowerBoundOffset = 25, CombinedNextUpperBoundOffset = 26, CombinedDistConditionOffset = 27, CombinedParForInDistConditionOffset = 28, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 29, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind())]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 2 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 3 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 4 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 5 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 6 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 7 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPGenericLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { Data->getChildren()[IterationVariableOffset] = IV; } void setLastIteration(Expr *LI) { Data->getChildren()[LastIterationOffset] = LI; } void setCalcLastIteration(Expr *CLI) { Data->getChildren()[CalcLastIterationOffset] = CLI; } void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; } void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; } void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; } void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[IsLastIterVariableOffset] = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[LowerBoundVariableOffset] = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[UpperBoundVariableOffset] = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[StrideVariableOffset] = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[EnsureUpperBoundOffset] = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextLowerBoundOffset] = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextUpperBoundOffset] = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NumIterationsOffset] = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[DistIncOffset] = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedInitOffset] = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedConditionOffset] = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedDistConditionOffset] = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedParForInDistConditionOffset] = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: Expr *getIterationVariable() const { return cast<Expr>(Data->getChildren()[IterationVariableOffset]); } Expr *getLastIteration() const { return cast<Expr>(Data->getChildren()[LastIterationOffset]); } Expr *getCalcLastIteration() const { return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]); } Expr *getPreCond() const { return cast<Expr>(Data->getChildren()[PreConditionOffset]); } Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); } Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); } Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); } const Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[StrideVariableOffset]); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPGenericLoopDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NumIterationsOffset]); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[DistIncOffset]); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedInitOffset]); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedConditionOffset]); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPGenericLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionsDirective() : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSingleDirective() : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc), DirName(Name) {} /// Build an empty directive. /// explicit OMPCriticalDirective() : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation()) {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master' directive. /// /// \code /// #pragma omp parallel master private(a,b) /// \endcode /// In this example directive '#pragma omp parallel master' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc) {} explicit OMPParallelMasterDirective() : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelMasterDirective *>(this) ->getTaskReductionRefExpr(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelSectionsDirective() : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelSectionsDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if this directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskDirective() : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation()) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskgroupDirective() : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation()) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef(); } Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPFlushDirective() : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp depobj' directive. /// /// \code /// #pragma omp depobj(a) depend(in:x,y) /// \endcode /// In this example directive '#pragma omp depobj' initializes a depobj object /// 'a' with dependence type 'in' and a list with 'x' and 'y' locators. class OMPDepobjDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDepobjDirective() : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPDepobjDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDepobjDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDepobjDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPOrderedDirective() : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// \param IsStandalone true, if the the standalone directive is created. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, bool IsStandalone, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart = false; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPAtomicDirective() : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation()) {} enum DataPositionTy : size_t { POS_X = 0, POS_V, POS_E, POS_UpdateExpr, }; /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { Data->getChildren()[DataPositionTy::POS_X] = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { Data->getChildren()[DataPositionTy::POS_UpdateExpr] = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]); } const Expr *getX() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>( Data->getChildren()[DataPositionTy::POS_UpdateExpr]); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>( Data->getChildren()[DataPositionTy::POS_UpdateExpr]); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]); } const Expr *getV() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]); } const Expr *getExpr() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDirective() : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDataDirective() : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetEnterDataDirective() : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetExitDataDirective() : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetParallelDirective() : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTeamsDirective() : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// statements and child expressions. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc) {} /// Build an empty directive. explicit OMPCancellationPointDirective() : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPCancelDirective() : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetUpdateDirective() : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc) { } /// Build an empty directive. /// explicit OMPTargetTeamsDirective() : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents the '#pragma omp tile' loop transformation directive. class OMPTileDirective final : public OMPLoopTransformationDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumLoops) : OMPLoopTransformationDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile, StartLoc, EndLoc, NumLoops) { setNumGeneratedLoops(3 * NumLoops); } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp tile'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param NumLoops Number of associated loops (number of items in the /// 'sizes' clause). /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, unsigned NumLoops, Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp tile' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. /// \param NumLoops Number of associated loops to allocate. static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned NumLoops); /// Gets/sets the associated loops after tiling. /// /// This is in de-sugared format stored as a CompoundStmt. /// /// \code /// for (...) /// ... /// \endcode /// /// Note that if the generated loops a become associated loops of another /// directive, they may need to be hoisted before them. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return preinits statement. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTileDirectiveClass; } }; /// This represents the '#pragma omp unroll' loop transformation directive. /// /// \code /// #pragma omp unroll /// for (int i = 0; i < 64; ++i) /// \endcode class OMPUnrollDirective final : public OMPLoopTransformationDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPLoopTransformationDirective(OMPUnrollDirectiveClass, llvm::omp::OMPD_unroll, StartLoc, EndLoc, 1) {} /// Set the pre-init statements. void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } /// Set the de-sugared statement. void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp unroll'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPUnrollDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumGeneratedLoops, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp unroll' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. static OMPUnrollDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses); /// Get the de-sugared associated loops after unrolling. /// /// This is only used if the unrolled loop becomes an associated loop of /// another directive, otherwise the loop is emitted directly using loop /// transformation metadata. When the unrolled loop cannot be used by another /// directive (e.g. because of the full clause), the transformed stmt can also /// be nullptr. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return the pre-init statements. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPUnrollDirectiveClass; } }; /// This represents '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' with /// list item 'a'. class OMPScanDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPScanDirective() : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPScanDirectiveClass; } }; /// This represents '#pragma omp interop' directive. /// /// \code /// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait /// \endcode /// In this example directive '#pragma omp interop' has /// clauses 'init', 'device', 'depend' and 'nowait'. /// class OMPInteropDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending location of the directive. /// OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPInteropDirective() : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending Location of the directive. /// \param Clauses The directive's clauses. /// static OMPInteropDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive. /// /// \param C AST context. /// static OMPInteropDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPInteropDirectiveClass; } }; /// This represents '#pragma omp dispatch' directive. /// /// \code /// #pragma omp dispatch device(dnum) /// \endcode /// This example shows a directive '#pragma omp dispatch' with a /// device clause with variable 'dnum'. /// class OMPDispatchDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// The location of the target-call. SourceLocation TargetCallLoc; /// Set the location of the target-call. void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; } /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDispatchDirective() : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TargetCallLoc Location of the target-call. /// static OMPDispatchDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, SourceLocation TargetCallLoc); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDispatchDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return location of target-call. SourceLocation getTargetCallLoc() const { return TargetCallLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDispatchDirectiveClass; } }; /// This represents '#pragma omp masked' directive. /// \code /// #pragma omp masked filter(tid) /// \endcode /// This example shows a directive '#pragma omp masked' with a filter clause /// with variable 'tid'. /// class OMPMaskedDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMaskedDirective() : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMaskedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMaskedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMaskedDirectiveClass; } }; /// This represents '#pragma omp metadirective' directive. /// /// \code /// #pragma omp metadirective when(user={condition(N>10)}: parallel for) /// \endcode /// In this example directive '#pragma omp metadirective' has clauses 'when' /// with a dynamic user condition to check if a variable 'N > 10' /// class OMPMetaDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; Stmt *IfStmt; OMPMetaDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMetaDirectiveClass, llvm::omp::OMPD_metadirective, StartLoc, EndLoc) {} explicit OMPMetaDirective() : OMPExecutableDirective(OMPMetaDirectiveClass, llvm::omp::OMPD_metadirective, SourceLocation(), SourceLocation()) {} void setIfStmt(Stmt *S) { IfStmt = S; } public: static OMPMetaDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Stmt *IfStmt); static OMPMetaDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); Stmt *getIfStmt() const { return IfStmt; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMetaDirectiveClass; } }; /// This represents '#pragma omp loop' directive. /// /// \code /// #pragma omp loop private(a,b) binding(parallel) order(concurrent) /// \endcode /// In this example directive '#pragma omp loop' has /// clauses 'private' with the variables 'a' and 'b', 'binding' with /// modifier 'parallel' and 'order(concurrent). /// class OMPGenericLoopDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPGenericLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPGenericLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with a place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// \param CollapsedNum Number of collapsed nested loops. /// static OMPGenericLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPGenericLoopDirectiveClass; } }; } // end namespace clang #endif
GB_unop__cosh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cosh_fc32_fc32) // op(A') function: GB (_unop_tran__cosh_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = ccoshf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccoshf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = ccoshf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COSH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cosh_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ccoshf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ccoshf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cosh_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pair_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_uint8 // A.*B function (eWiseMult): GB_AemultB__pair_uint8 // A*D function (colscale): GB_AxD__pair_uint8 // D*A function (rowscale): GB_DxB__pair_uint8 // C+=B function (dense accum): GB_Cdense_accumB__pair_uint8 // C+=b function (dense accum): GB_Cdense_accumb__pair_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint8 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT8 || GxB_NO_PAIR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
convolutiondepthwise_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = static_cast<int>(round(v)); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } static void convdw3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } static void convdw3x3s1_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const int32_t *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); float *outptr = out; const float scale_dequant = scales_dequant[p]; //const float bias0 = bias ? bias[p] * scale_dequant : 0.f; const int32_t bias0 = bias ? bias[p] : 0; //out.fill(bias0); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; //*outptr += (float)sum * scale_dequant; *outptr += (sum + bias0) * scale_dequant; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; const int32_t *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); float *outptr = out; const float scale_dequant = scales_dequant[p]; //const float bias0 = bias ? bias[p] * scale_dequant : 0.f; const int32_t bias0 = bias ? bias[p] : 0; //out.fill(bias0); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; //*outptr += (float)sum * scale_dequant; *outptr += (sum + bias0) * scale_dequant; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } static void convdw3x3s1_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); signed char *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; const float *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); signed char *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/sync.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Init(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics auto iter = p_fmat->ColIterator(); iter->BeforeFirst(); while (iter->Next()) { auto &batch = iter->Value(); for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const { std::vector<bst_uint> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector<unsigned> &root_index = fmat.Info().root_index_; { // setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std::fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set rest of instances to default position const RowSet &rowset = p_fmat->BufferedRowset(); // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(rowset.Size()); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { const bst_uint ridx = rowset[i]; const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); auto iter = p_fmat->ColIterator(); while (iter->Next()) { auto &batch = iter->Value(); for (auto fid : fsplits) { auto col = batch[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; const MetaInfo &info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats(param_)); for (unsigned int nid : qexpand_) { thread_temp[tid][nid].Clear(); } } const RowSet &rowset = fmat.BufferedRowset(); // setup position const auto ndata = static_cast<bst_omp_uint>(rowset.Size()); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { const bst_uint ridx = rowset[i]; const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair, info, ridx); } } // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s.Clear(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
GB_binop__ge_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_bool // A.*B function (eWiseMult): GB_AemultB__ge_bool // A*D function (colscale): GB_AxD__ge_bool // D*A function (rowscale): GB_DxB__ge_bool // C+=B function (dense accum): GB_Cdense_accumB__ge_bool // C+=b function (dense accum): GB_Cdense_accumb__ge_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_bool // C=scalar+B GB_bind1st__ge_bool // C=scalar+B' GB_bind1st_tran__ge_bool // C=A+scalar GB_bind2nd__ge_bool // C=A'+scalar GB_bind2nd_tran__ge_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_BOOL || GxB_NO_GE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ge_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3_parallel_queue_first_n_push.c
/* Program : 3 Author : Anish Topic : Write a C program using OpenMP features to create two parallel threads. The first thread should insert the first ‘N’ natural numbers into a queue in sequence, and the second thread should remove the numbers from the queue. */ #include<stdio.h> #include<omp.h> #include<stdlib.h> int main() { int n; printf("\n ENTER THE VALUE OF N \n"); scanf("%d",&n); int id,d,Q[n],rear=-1,front=0,i=1; omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id=omp_get_thread_num(); if(id==0) //insert { while(1) { #pragma omp critical { if(rear<n-1) { Q[++rear]=i; printf("\n INSERTED ITEM IS %d",i); i++; } else printf("\n NO SPACE"); fgetc(stdin); } } } else while(1) //pop { #pragma omp critical { if(front<=rear) { d=Q[front]; front++; printf("\n DELETED ITEM IS %d",d); } else printf("\n NO ITEMS TO DELETE"); fgetc(stdin); } } } return 0; }
GB_unop__sqrt_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sqrt_fp32_fp32 // op(A') function: GB_unop_tran__sqrt_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = sqrtf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sqrtf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = sqrtf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SQRT || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sqrt_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = sqrtf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sqrt_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
_polyprism.c
/* Generated by Cython 0.20.1 on Thu Jul 3 12:41:06 2014 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #define CYTHON_ABI "0_20_1" #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY #define Py_OptimizeFlag 0 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_VERSION_HEX < 0x02060000 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x02060000 #define Py_TPFLAGS_HAVE_VERSION_TAG 0 #endif #if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT) #define Py_TPFLAGS_IS_ABSTRACT 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \ PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__fatiando__gravmag___polyprism #define __PYX_HAVE_API__fatiando__gravmag___polyprism #include "math.h" #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "omp.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s) #define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s) #define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s) #define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s) #define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return u_end - u - 1; } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { const char* default_encoding_c = PyBytes_AS_STRING(default_encoding); char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (ascii_chars_u == NULL) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } } Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; default_encoding_c = PyBytes_AS_STRING(default_encoding); __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(sys); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); return -1; } #endif #endif #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_polyprism.pyx", "__init__.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fatiando/gravmag/_polyprism.pyx":16 * * DTYPE = numpy.float * ctypedef numpy.float_t DTYPE_T # <<<<<<<<<<<<<< * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, */ typedef __pyx_t_5numpy_float_t __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) static CYTHON_INLINE long __Pyx_mod_long(long, long); /* proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'openmp' */ /* Module declarations from 'fatiando.gravmag._polyprism' */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double, double, double, double, double, double, double, double); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T = { "DTYPE_T", NULL, sizeof(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "fatiando.gravmag._polyprism" int __pyx_module_is_main_fatiando__gravmag___polyprism = 0; /* Implementation of 'fatiando.gravmag._polyprism' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_B[] = "B"; static char __pyx_k_H[] = "H"; static char __pyx_k_I[] = "I"; static char __pyx_k_L[] = "L"; static char __pyx_k_O[] = "O"; static char __pyx_k_Q[] = "Q"; static char __pyx_k_b[] = "b"; static char __pyx_k_d[] = "d"; static char __pyx_k_f[] = "f"; static char __pyx_k_g[] = "g"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_k[] = "k"; static char __pyx_k_l[] = "l"; static char __pyx_k_q[] = "q"; static char __pyx_k_x[] = "x"; static char __pyx_k_y[] = "y"; static char __pyx_k_X1[] = "X1"; static char __pyx_k_X2[] = "X2"; static char __pyx_k_Y1[] = "Y1"; static char __pyx_k_Y2[] = "Y2"; static char __pyx_k_Z1[] = "Z1"; static char __pyx_k_Z2[] = "Z2"; static char __pyx_k_Zd[] = "Zd"; static char __pyx_k_Zf[] = "Zf"; static char __pyx_k_Zg[] = "Zg"; static char __pyx_k_bx[] = "bx"; static char __pyx_k_by[] = "by"; static char __pyx_k_bz[] = "bz"; static char __pyx_k_fx[] = "fx"; static char __pyx_k_fy[] = "fy"; static char __pyx_k_fz[] = "fz"; static char __pyx_k_gz[] = "gz"; static char __pyx_k_mx[] = "mx"; static char __pyx_k_my[] = "my"; static char __pyx_k_mz[] = "mz"; static char __pyx_k_tf[] = "tf"; static char __pyx_k_v1[] = "v1"; static char __pyx_k_v2[] = "v2"; static char __pyx_k_v3[] = "v3"; static char __pyx_k_v4[] = "v4"; static char __pyx_k_v5[] = "v5"; static char __pyx_k_v6[] = "v6"; static char __pyx_k_xp[] = "xp"; static char __pyx_k_yp[] = "yp"; static char __pyx_k_z1[] = "z1"; static char __pyx_k_z2[] = "z2"; static char __pyx_k_zp[] = "zp"; static char __pyx_k_gxx[] = "gxx"; static char __pyx_k_gxy[] = "gxy"; static char __pyx_k_gxz[] = "gxz"; static char __pyx_k_gyy[] = "gyy"; static char __pyx_k_gyz[] = "gyz"; static char __pyx_k_gzz[] = "gzz"; static char __pyx_k_kp1[] = "kp1"; static char __pyx_k_res[] = "res"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_size[] = "size"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_DTYPE[] = "DTYPE"; static char __pyx_k_float[] = "float"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_Z1_sqr[] = "Z1_sqr"; static char __pyx_k_Z2_sqr[] = "Z2_sqr"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_kernel[] = "kernel"; static char __pyx_k_nverts[] = "nverts"; static char __pyx_k_density[] = "density"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_RuntimeError[] = "RuntimeError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer"; static char __pyx_k_fatiando_gravmag__polyprism[] = "fatiando.gravmag._polyprism"; static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static char __pyx_k_home_leo_src_fatiando_fatiando[] = "/home/leo/src/fatiando/fatiando/gravmag/_polyprism.pyx"; static char __pyx_k_This_is_a_Cython_implementation[] = "\nThis is a Cython implementation of the potential fields of a polygonal prism.\nA pure python implementation is in _polyprism_numpy.py\n"; static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_X1; static PyObject *__pyx_n_s_X2; static PyObject *__pyx_n_s_Y1; static PyObject *__pyx_n_s_Y2; static PyObject *__pyx_n_s_Z1; static PyObject *__pyx_n_s_Z1_sqr; static PyObject *__pyx_n_s_Z2; static PyObject *__pyx_n_s_Z2_sqr; static PyObject *__pyx_n_s_bx; static PyObject *__pyx_n_s_by; static PyObject *__pyx_n_s_bz; static PyObject *__pyx_n_s_density; static PyObject *__pyx_n_s_fatiando_gravmag__polyprism; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_fx; static PyObject *__pyx_n_s_fy; static PyObject *__pyx_n_s_fz; static PyObject *__pyx_n_s_gxx; static PyObject *__pyx_n_s_gxy; static PyObject *__pyx_n_s_gxz; static PyObject *__pyx_n_s_gyy; static PyObject *__pyx_n_s_gyz; static PyObject *__pyx_n_s_gz; static PyObject *__pyx_n_s_gzz; static PyObject *__pyx_kp_s_home_leo_src_fatiando_fatiando; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_kernel; static PyObject *__pyx_n_s_kp1; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mx; static PyObject *__pyx_n_s_my; static PyObject *__pyx_n_s_mz; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_nverts; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_releasebuffer; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_res; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tf; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_v1; static PyObject *__pyx_n_s_v2; static PyObject *__pyx_n_s_v3; static PyObject *__pyx_n_s_v4; static PyObject *__pyx_n_s_v5; static PyObject *__pyx_n_s_v6; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xp; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_yp; static PyObject *__pyx_n_s_z1; static PyObject *__pyx_n_s_z2; static PyObject *__pyx_n_s_zp; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_codeobj__8; static PyObject *__pyx_codeobj__10; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; /* "fatiando/gravmag/_polyprism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, double __pyx_v_Z1_sqr, double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":24 * Qk1, Qk2, Ak1, Ak2, R1k1, R1k2, R2k1, R2k2, Bk1, Bk2, E1k1, \ * E1k2, E2k1, E2k2, Ck1, Ck2 * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * p = X1*Y2 - X2*Y1 */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":25 * E1k2, E2k1, E2k2, Ck1, Ck2 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * p = X1*Y2 - X2*Y1 * p_sqr = p**2 */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":26 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * p = X1*Y2 - X2*Y1 # <<<<<<<<<<<<<< * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 */ __pyx_v_p = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":27 * kernel = 0 * p = X1*Y2 - X2*Y1 * p_sqr = p**2 # <<<<<<<<<<<<<< * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 */ __pyx_v_p_sqr = pow(__pyx_v_p, 2.0); /* "fatiando/gravmag/_polyprism.pyx":28 * p = X1*Y2 - X2*Y1 * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 # <<<<<<<<<<<<<< * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 */ __pyx_v_Qk1 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y1) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X1)); /* "fatiando/gravmag/_polyprism.pyx":29 * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 # <<<<<<<<<<<<<< * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 */ __pyx_v_Qk2 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y2) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X2)); /* "fatiando/gravmag/_polyprism.pyx":30 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 # <<<<<<<<<<<<<< * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) */ __pyx_v_Ak1 = (pow(__pyx_v_X1, 2.0) + pow(__pyx_v_Y1, 2.0)); /* "fatiando/gravmag/_polyprism.pyx":31 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 # <<<<<<<<<<<<<< * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) */ __pyx_v_Ak2 = (pow(__pyx_v_X2, 2.0) + pow(__pyx_v_Y2, 2.0)); /* "fatiando/gravmag/_polyprism.pyx":32 * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) # <<<<<<<<<<<<<< * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) */ __pyx_v_R1k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z1_sqr)); /* "fatiando/gravmag/_polyprism.pyx":33 * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) # <<<<<<<<<<<<<< * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) */ __pyx_v_R1k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z1_sqr)); /* "fatiando/gravmag/_polyprism.pyx":34 * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) # <<<<<<<<<<<<<< * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) */ __pyx_v_R2k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":35 * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) # <<<<<<<<<<<<<< * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) */ __pyx_v_R2k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":36 * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) # <<<<<<<<<<<<<< * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) */ __pyx_v_Ak1 = sqrt(__pyx_v_Ak1); /* "fatiando/gravmag/_polyprism.pyx":37 * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) # <<<<<<<<<<<<<< * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) */ __pyx_v_Ak2 = sqrt(__pyx_v_Ak2); /* "fatiando/gravmag/_polyprism.pyx":38 * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) # <<<<<<<<<<<<<< * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 */ __pyx_v_Bk1 = sqrt((pow(__pyx_v_Qk1, 2.0) + __pyx_v_p_sqr)); /* "fatiando/gravmag/_polyprism.pyx":39 * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) # <<<<<<<<<<<<<< * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 */ __pyx_v_Bk2 = sqrt((pow(__pyx_v_Qk2, 2.0) + __pyx_v_p_sqr)); /* "fatiando/gravmag/_polyprism.pyx":40 * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 # <<<<<<<<<<<<<< * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 */ __pyx_v_E1k1 = (__pyx_v_R1k1 * __pyx_v_Bk1); /* "fatiando/gravmag/_polyprism.pyx":41 * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 # <<<<<<<<<<<<<< * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 */ __pyx_v_E1k2 = (__pyx_v_R1k2 * __pyx_v_Bk2); /* "fatiando/gravmag/_polyprism.pyx":42 * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 # <<<<<<<<<<<<<< * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) */ __pyx_v_E2k1 = (__pyx_v_R2k1 * __pyx_v_Bk1); /* "fatiando/gravmag/_polyprism.pyx":43 * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 # <<<<<<<<<<<<<< * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) */ __pyx_v_E2k2 = (__pyx_v_R2k2 * __pyx_v_Bk2); /* "fatiando/gravmag/_polyprism.pyx":44 * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) # <<<<<<<<<<<<<< * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) */ __pyx_v_kernel = (__pyx_v_kernel + ((__pyx_v_Z2 - __pyx_v_Z1) * (atan2(__pyx_v_Qk2, __pyx_v_p) - atan2(__pyx_v_Qk1, __pyx_v_p)))); /* "fatiando/gravmag/_polyprism.pyx":45 * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) # <<<<<<<<<<<<<< * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z2 * (atan2((__pyx_v_Z2 * __pyx_v_Qk1), (__pyx_v_R2k1 * __pyx_v_p)) - atan2((__pyx_v_Z2 * __pyx_v_Qk2), (__pyx_v_R2k2 * __pyx_v_p))))); /* "fatiando/gravmag/_polyprism.pyx":46 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) # <<<<<<<<<<<<<< * Ck1 = Qk1*Ak1 * Ck2 = Qk2*Ak2 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z1 * (atan2((__pyx_v_Z1 * __pyx_v_Qk2), (__pyx_v_R1k2 * __pyx_v_p)) - atan2((__pyx_v_Z1 * __pyx_v_Qk1), (__pyx_v_R1k1 * __pyx_v_p))))); /* "fatiando/gravmag/_polyprism.pyx":47 * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 # <<<<<<<<<<<<<< * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors */ __pyx_v_Ck1 = (__pyx_v_Qk1 * __pyx_v_Ak1); /* "fatiando/gravmag/_polyprism.pyx":48 * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 * Ck2 = Qk2*Ak2 # <<<<<<<<<<<<<< * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( */ __pyx_v_Ck2 = (__pyx_v_Qk2 * __pyx_v_Ak2); /* "fatiando/gravmag/_polyprism.pyx":50 * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( # <<<<<<<<<<<<<< * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_t_1 = (__pyx_v_Bk1 + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":51 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( */ __pyx_t_2 = (__pyx_v_E1k1 - __pyx_v_Ck1); __pyx_t_3 = ((__pyx_v_E1k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":52 * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) # <<<<<<<<<<<<<< * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - */ __pyx_t_4 = (__pyx_v_E2k1 - __pyx_v_Ck1); __pyx_t_5 = ((__pyx_v_E2k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":50 * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( # <<<<<<<<<<<<<< * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak1 / __pyx_t_1)) * (log(((__pyx_t_2 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_4 / __pyx_t_5) + __pyx_v_dummy))))); /* "fatiando/gravmag/_polyprism.pyx":53 * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_t_5 = (__pyx_v_Bk2 + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":54 * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) * return kernel */ __pyx_t_4 = (__pyx_v_E2k2 - __pyx_v_Ck2); __pyx_t_3 = ((__pyx_v_E2k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":55 * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) # <<<<<<<<<<<<<< * return kernel * */ __pyx_t_2 = (__pyx_v_E1k2 - __pyx_v_Ck2); __pyx_t_1 = ((__pyx_v_E1k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":53 * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak2 / __pyx_t_5)) * (log(((__pyx_t_4 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy))))); /* "fatiando/gravmag/_polyprism.pyx":56 * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":58 * return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":65 * aux14, aux15, aux16, n, g, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":66 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":67 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":68 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":69 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":70 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":71 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":72 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":73 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":74 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":75 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":76 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":77 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":78 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":79 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":80 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":81 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":82 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":83 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":84 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":85 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":86 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":87 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":88 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":89 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":90 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":91 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = (((__pyx_v_g * __pyx_v_Y2) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":92 * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":93 * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":94 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":95 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":96 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":97 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_g * __pyx_v_Y1) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":98 * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":99 * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":100 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":101 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":102 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":103 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":104 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_n * (__pyx_v_aux15 - __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":105 * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= -aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":106 * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* "fatiando/gravmag/_polyprism.pyx":107 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":108 * res *= -aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":58 * return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxx", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":110 * return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":117 * aux14, aux15, aux16, n, g, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":118 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":119 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":120 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":121 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":122 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":123 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":124 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":125 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":126 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":127 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":128 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":129 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":130 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":131 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":132 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":133 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":134 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":135 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":136 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":137 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":138 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":139 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":140 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":141 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":142 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":143 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y2)) * __pyx_v_aux13) - __pyx_v_aux14); /* "fatiando/gravmag/_polyprism.pyx":144 * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":145 * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":146 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":147 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":148 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":149 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y1)) * __pyx_v_aux13) - __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":150 * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":151 * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":152 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":153 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":154 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (aux14 - aux15) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":155 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":156 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (aux14 - aux15) # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_aux14 - __pyx_v_aux15)); /* "fatiando/gravmag/_polyprism.pyx":157 * aux15 = aux12 - aux13 * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":158 * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":159 * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":160 * res *= aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":110 * return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":162 * return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":169 * aux14, aux15, aux16, n, g, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":170 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":171 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":172 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":173 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":174 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":175 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":176 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":177 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":178 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":179 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":180 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":181 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":182 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":183 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":184 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":185 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":186 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":187 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":188 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":189 * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":190 * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":191 * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":192 * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) # <<<<<<<<<<<<<< * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":193 * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) # <<<<<<<<<<<<<< * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":194 * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) # <<<<<<<<<<<<<< * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* "fatiando/gravmag/_polyprism.pyx":195 * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 # <<<<<<<<<<<<<< * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 */ __pyx_v_res = (((__pyx_v_Y2 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16); /* "fatiando/gravmag/_polyprism.pyx":196 * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) # <<<<<<<<<<<<<< * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* "fatiando/gravmag/_polyprism.pyx":197 * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_Y1 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16)); /* "fatiando/gravmag/_polyprism.pyx":198 * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= -aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":199 * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* "fatiando/gravmag/_polyprism.pyx":200 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":201 * res *= -aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":162 * return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":203 * return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":210 * aux14, aux15, p, m, c, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":211 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":212 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":213 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * m = (aux1/aux0) * c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":214 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) # <<<<<<<<<<<<<< * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":215 * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) * c = Y1 - (X1*m) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* "fatiando/gravmag/_polyprism.pyx":216 * m = (aux1/aux0) * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":217 * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":218 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":219 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":220 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":221 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":222 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":223 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":224 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":225 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":226 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":227 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":228 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":229 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":230 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":231 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":232 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":233 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":234 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":235 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":236 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = (((__pyx_v_c * __pyx_v_X2) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":237 * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":238 * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":239 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":240 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":241 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":242 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_c * __pyx_v_X1) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":243 * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":244 * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":245 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":246 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":247 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":248 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":249 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_m * (__pyx_v_aux15 - __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":250 * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) # <<<<<<<<<<<<<< * res *= aux1 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":251 * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":252 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":253 * res *= aux1 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":203 * return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":255 * return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":262 * aux14, aux15, aux16, m, c, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":263 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":264 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":265 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * m = (aux1/aux0) * c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":266 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) # <<<<<<<<<<<<<< * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":267 * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) * c = Y1 - (X1*m) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* "fatiando/gravmag/_polyprism.pyx":268 * m = (aux1/aux0) * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":269 * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":270 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":271 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":272 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":273 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":274 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":275 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":276 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":277 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":278 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":279 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":280 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":281 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":282 * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":283 * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":284 * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":285 * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) # <<<<<<<<<<<<<< * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":286 * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) # <<<<<<<<<<<<<< * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":287 * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) # <<<<<<<<<<<<<< * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* "fatiando/gravmag/_polyprism.pyx":288 * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 # <<<<<<<<<<<<<< * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 */ __pyx_v_res = (((__pyx_v_X2 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16); /* "fatiando/gravmag/_polyprism.pyx":289 * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) # <<<<<<<<<<<<<< * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* "fatiando/gravmag/_polyprism.pyx":290 * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 # <<<<<<<<<<<<<< * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_X1 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16)); /* "fatiando/gravmag/_polyprism.pyx":291 * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) # <<<<<<<<<<<<<< * res *= aux1 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":292 * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":293 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":294 * res *= aux1 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":255 * return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":296 * return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":302 * aux5, aux6, aux7, aux8, aux9, aux10, aux11, aux12, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":303 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":304 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":305 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":306 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":307 * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":308 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":309 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":310 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":311 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":312 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":313 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":314 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":315 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":316 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":317 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":318 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":319 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":320 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":321 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":322 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * res = aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":323 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":324 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * res = aux12 # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = __pyx_v_aux12; /* "fatiando/gravmag/_polyprism.pyx":325 * aux12 = aux10 - aux11 * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":326 * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * res -= aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":327 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * res -= aux12 * kernel += res */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":328 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * res -= aux12 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res - __pyx_v_aux12); /* "fatiando/gravmag/_polyprism.pyx":329 * aux12 = aux10 - aux11 * res -= aux12 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":330 * res -= aux12 * kernel += res * return kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":296 * return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelzz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_gz[] = "gz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz = {__Pyx_NAMESTR("gz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_gz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_gz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":344 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 344; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":345 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 345; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":346 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":347 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":348 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":349 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":350 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":351 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":352 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":353 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":354 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":355 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":356 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":357 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":358 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":359 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":360 * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Y1; __pyx_parallel_temp1 = __pyx_v_Z2_sqr; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_Y2; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_kp1; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Y1 = __pyx_parallel_temp0; __pyx_v_Z2_sqr = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_Y2 = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_kp1 = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":346 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx[] = "gxx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx = {__Pyx_NAMESTR("gxx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 368; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":374 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":375 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":376 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":377 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_kp1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":378 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":379 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":380 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":381 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":382 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":383 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":384 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":385 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":386 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":387 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":388 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":389 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":390 * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates1) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_X2; __pyx_parallel_temp1 = __pyx_v_kernel; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_i; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Y2; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_Z2; __pyx_parallel_temp11 = __pyx_v_kp1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_X2 = __pyx_parallel_temp0; __pyx_v_kernel = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_i = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Y2 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_Z2 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":376 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy[] = "gxy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy = {__Pyx_NAMESTR("gxy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":404 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 404; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":405 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":406 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":407 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_kp1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":408 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":409 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":410 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":411 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":412 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":413 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":414 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":415 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":416 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":417 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":418 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":419 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":420 * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates2) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_k; __pyx_parallel_temp1 = __pyx_v_Y1; __pyx_parallel_temp2 = __pyx_v_X2; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Y2; __pyx_parallel_temp5 = __pyx_v_Z2_sqr; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_kp1; __pyx_parallel_temp11 = __pyx_v_i; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_k = __pyx_parallel_temp0; __pyx_v_Y1 = __pyx_parallel_temp1; __pyx_v_X2 = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Y2 = __pyx_parallel_temp4; __pyx_v_Z2_sqr = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_kp1 = __pyx_parallel_temp10; __pyx_v_i = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":406 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz[] = "gxz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz = {__Pyx_NAMESTR("gxz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 426; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 428; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":434 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":435 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 435; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":436 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":437 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); unsigned int __pyx_parallel_temp5 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":438 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":439 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":440 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":441 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":442 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":443 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":444 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":445 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":446 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":447 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":448 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":449 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":450 * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates3) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kernel; __pyx_parallel_temp1 = __pyx_v_Y2; __pyx_parallel_temp2 = __pyx_v_k; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_kp1; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_Y1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kernel = __pyx_parallel_temp0; __pyx_v_Y2 = __pyx_parallel_temp1; __pyx_v_k = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_kp1 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_Y1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":436 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy[] = "gyy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy = {__Pyx_NAMESTR("gyy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":464 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":465 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 465; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":466 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":467 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":468 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":469 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":470 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":471 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":472 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":473 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":474 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":475 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":476 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":477 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":478 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":479 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":480 * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates4) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_Z1_sqr; __pyx_parallel_temp2 = __pyx_v_i; __pyx_parallel_temp3 = __pyx_v_kernel; __pyx_parallel_temp4 = __pyx_v_Z2; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Y1; __pyx_parallel_temp7 = __pyx_v_X2; __pyx_parallel_temp8 = __pyx_v_X1; __pyx_parallel_temp9 = __pyx_v_Z2_sqr; __pyx_parallel_temp10 = __pyx_v_k; __pyx_parallel_temp11 = __pyx_v_Y2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_Z1_sqr = __pyx_parallel_temp1; __pyx_v_i = __pyx_parallel_temp2; __pyx_v_kernel = __pyx_parallel_temp3; __pyx_v_Z2 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Y1 = __pyx_parallel_temp6; __pyx_v_X2 = __pyx_parallel_temp7; __pyx_v_X1 = __pyx_parallel_temp8; __pyx_v_Z2_sqr = __pyx_parallel_temp9; __pyx_v_k = __pyx_parallel_temp10; __pyx_v_Y2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":466 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz[] = "gyz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz = {__Pyx_NAMESTR("gyz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 487; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":494 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 494; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":495 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 495; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":496 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":497 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_k) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":498 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":499 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":500 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":501 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":502 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":503 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":504 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":505 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":506 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":507 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":508 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":509 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":510 * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates5) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_kp1; __pyx_parallel_temp2 = __pyx_v_Z1; __pyx_parallel_temp3 = __pyx_v_X1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Y1; __pyx_parallel_temp9 = __pyx_v_kernel; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_k; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_kp1 = __pyx_parallel_temp1; __pyx_v_Z1 = __pyx_parallel_temp2; __pyx_v_X1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Y1 = __pyx_parallel_temp8; __pyx_v_kernel = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_k = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":496 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz[] = "gzz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz = {__Pyx_NAMESTR("gzz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gzz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gzz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gzz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":524 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":525 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":526 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":527 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z2_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":528 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":529 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":530 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":531 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":532 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":533 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":534 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":535 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":536 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":537 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":538 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":539 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":540 * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates6) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_k; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_i; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_Z1; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_k = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_i = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_Z1 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":526 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_14tf[] = "tf(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, double fx, double fy, double fz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf = {__Pyx_NAMESTR("tf"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_14tf)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; double __pyx_v_fx; double __pyx_v_fy; double __pyx_v_fz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("tf (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_fx,&__pyx_n_s_fy,&__pyx_n_s_fz,&__pyx_n_s_res,0}; PyObject* values[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 13: if (likely((values[13] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 13); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 14) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); values[13] = PyTuple_GET_ITEM(__pyx_args, 13); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fx = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_fx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fy = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_fy == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fz = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_fz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[13]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 545; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 547; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 551; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_fx, __pyx_v_fy, __pyx_v_fz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("tf", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":556 * DTYPE_T v1, v2, v3, v4, v5, v6 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 556; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":557 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":558 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":559 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); unsigned int __pyx_parallel_temp13 = 0xbad0bad0; unsigned int __pyx_parallel_temp14 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp15 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp16 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v5) reduction(+:__pyx_v_v6) reduction(+:__pyx_v_v1) reduction(+:__pyx_v_v4) reduction(+:__pyx_v_v3) reduction(+:__pyx_v_v2) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_Z2_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":560 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":561 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":562 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":563 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v1 = 0 * v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":564 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v1 = 0 # <<<<<<<<<<<<<< * v2 = 0 * v3 = 0 */ __pyx_v_v1 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":565 * Z2_sqr = Z2**2 * v1 = 0 * v2 = 0 # <<<<<<<<<<<<<< * v3 = 0 * v4 = 0 */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":566 * v1 = 0 * v2 = 0 * v3 = 0 # <<<<<<<<<<<<<< * v4 = 0 * v5 = 0 */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":567 * v2 = 0 * v3 = 0 * v4 = 0 # <<<<<<<<<<<<<< * v5 = 0 * v6 = 0 */ __pyx_v_v4 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":568 * v3 = 0 * v4 = 0 * v5 = 0 # <<<<<<<<<<<<<< * v6 = 0 * for k in range(nverts): */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":569 * v4 = 0 * v5 = 0 * v6 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":570 * v5 = 0 * v6 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":571 * v6 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":572 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":573 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":574 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":575 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":576 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":577 * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":578 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":579 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":580 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (fx*(v1*mx + v2*my + v3*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":581 * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (fx*(v1*mx + v2*my + v3*mz) * + fy*(v2*mx + v4*my + v5*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":582 * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (fx*(v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * + fy*(v2*mx + v4*my + v5*mz) * + fz*(v3*mx + v5*my + v6*mz)) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_fx * (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz))) + (__pyx_v_fy * (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)))) + (__pyx_v_fz * (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)))); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates7) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_v5; __pyx_parallel_temp2 = __pyx_v_v6; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_v1; __pyx_parallel_temp5 = __pyx_v_v4; __pyx_parallel_temp6 = __pyx_v_v3; __pyx_parallel_temp7 = __pyx_v_Z2_sqr; __pyx_parallel_temp8 = __pyx_v_i; __pyx_parallel_temp9 = __pyx_v_v2; __pyx_parallel_temp10 = __pyx_v_Z1_sqr; __pyx_parallel_temp11 = __pyx_v_X1; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_kp1; __pyx_parallel_temp14 = __pyx_v_k; __pyx_parallel_temp15 = __pyx_v_X2; __pyx_parallel_temp16 = __pyx_v_Z1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_v5 = __pyx_parallel_temp1; __pyx_v_v6 = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_v1 = __pyx_parallel_temp4; __pyx_v_v4 = __pyx_parallel_temp5; __pyx_v_v3 = __pyx_parallel_temp6; __pyx_v_Z2_sqr = __pyx_parallel_temp7; __pyx_v_i = __pyx_parallel_temp8; __pyx_v_v2 = __pyx_parallel_temp9; __pyx_v_Z1_sqr = __pyx_parallel_temp10; __pyx_v_X1 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_kp1 = __pyx_parallel_temp13; __pyx_v_k = __pyx_parallel_temp14; __pyx_v_X2 = __pyx_parallel_temp15; __pyx_v_Z1 = __pyx_parallel_temp16; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":558 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_16bx[] = "bx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx = {__Pyx_NAMESTR("bx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_16bx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":600 * DTYPE_T v1, v2, v3 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":601 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 601; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":602 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":603 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v1) reduction(+:__pyx_v_v2) reduction(+:__pyx_v_v3) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":604 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":605 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":606 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":607 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v1 = 0 * v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":608 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v1 = 0 # <<<<<<<<<<<<<< * v2 = 0 * v3 = 0 */ __pyx_v_v1 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":609 * Z2_sqr = Z2**2 * v1 = 0 * v2 = 0 # <<<<<<<<<<<<<< * v3 = 0 * for k in range(nverts): */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":610 * v1 = 0 * v2 = 0 * v3 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":611 * v2 = 0 * v3 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":612 * v3 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":613 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":614 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":615 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":616 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":617 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":618 * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v1*mx + v2*my + v3*mz) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":619 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v1*mx + v2*my + v3*mz) * */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":620 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates8) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2_sqr; __pyx_parallel_temp1 = __pyx_v_v1; __pyx_parallel_temp2 = __pyx_v_Y2; __pyx_parallel_temp3 = __pyx_v_Z1_sqr; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_X1; __pyx_parallel_temp6 = __pyx_v_i; __pyx_parallel_temp7 = __pyx_v_v2; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_v3; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_kp1; __pyx_parallel_temp12 = __pyx_v_Z1; __pyx_parallel_temp13 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2_sqr = __pyx_parallel_temp0; __pyx_v_v1 = __pyx_parallel_temp1; __pyx_v_Y2 = __pyx_parallel_temp2; __pyx_v_Z1_sqr = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_X1 = __pyx_parallel_temp5; __pyx_v_i = __pyx_parallel_temp6; __pyx_v_v2 = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_v3 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; __pyx_v_Z1 = __pyx_parallel_temp12; __pyx_v_X2 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":602 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_18by[] = "by(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_19by = {__Pyx_NAMESTR("by"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_19by, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_18by)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("by (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "by") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 625; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 631; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_18by(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("by", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":636 * DTYPE_T v2, v4, v5 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":637 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":638 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":639 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); unsigned int __pyx_parallel_temp7 = 0xbad0bad0; unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v4) reduction(+:__pyx_v_v5) reduction(+:__pyx_v_v2) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_X1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":640 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":641 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":642 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v2 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":643 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v2 = 0 * v4 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":644 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v2 = 0 # <<<<<<<<<<<<<< * v4 = 0 * v5 = 0 */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":645 * Z2_sqr = Z2**2 * v2 = 0 * v4 = 0 # <<<<<<<<<<<<<< * v5 = 0 * for k in range(nverts): */ __pyx_v_v4 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":646 * v2 = 0 * v4 = 0 * v5 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":647 * v4 = 0 * v5 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":648 * v5 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":649 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":650 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":651 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":652 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":653 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":654 * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v2*mx + v4*my + v5*mz) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":655 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v2*mx + v4*my + v5*mz) * */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":656 * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v2*mx + v4*my + v5*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates9) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z1; __pyx_parallel_temp1 = __pyx_v_Z2; __pyx_parallel_temp2 = __pyx_v_v4; __pyx_parallel_temp3 = __pyx_v_v5; __pyx_parallel_temp4 = __pyx_v_v2; __pyx_parallel_temp5 = __pyx_v_X2; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_kp1; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Y1; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_Z2_sqr; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_X1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z1 = __pyx_parallel_temp0; __pyx_v_Z2 = __pyx_parallel_temp1; __pyx_v_v4 = __pyx_parallel_temp2; __pyx_v_v5 = __pyx_parallel_temp3; __pyx_v_v2 = __pyx_parallel_temp4; __pyx_v_X2 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_kp1 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Y1 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_Z2_sqr = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_X1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":638 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_20bz[] = "bz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz = {__Pyx_NAMESTR("bz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_20bz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 664; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":672 * DTYPE_T v3, v5, v6 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 672; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":673 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":674 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":675 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v3) reduction(+:__pyx_v_v6) reduction(+:__pyx_v_v5) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Y1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":676 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":677 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":678 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v3 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":679 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v3 = 0 * v5 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":680 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v3 = 0 # <<<<<<<<<<<<<< * v5 = 0 * v6 = 0 */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":681 * Z2_sqr = Z2**2 * v3 = 0 * v5 = 0 # <<<<<<<<<<<<<< * v6 = 0 * for k in range(nverts): */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":682 * v3 = 0 * v5 = 0 * v6 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":683 * v5 = 0 * v6 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":684 * v6 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":685 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":686 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 686; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":687 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":688 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":689 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":690 * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v3*mx + v5*my + v6*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":691 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v3*mx + v5*my + v6*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":692 * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v3*mx + v5*my + v6*mz) # <<<<<<<<<<<<<< */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates10) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_v3; __pyx_parallel_temp1 = __pyx_v_X2; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_kp1; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z2_sqr; __pyx_parallel_temp7 = __pyx_v_v6; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_v5; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_Y1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_v3 = __pyx_parallel_temp0; __pyx_v_X2 = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_kp1 = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z2_sqr = __pyx_parallel_temp6; __pyx_v_v6 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_v5 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_Y1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":674 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_2 = (__pyx_v_copy_shape != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0); if (__pyx_t_1) { __pyx_t_2 = (__pyx_v_little_endian != 0); } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_1) { __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ switch (__pyx_v_t) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ case NPY_BYTE: __pyx_v_f = __pyx_k_b; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = __pyx_k_B; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = __pyx_k_h; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = __pyx_k_H; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = __pyx_k_i; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = __pyx_k_I; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = __pyx_k_l; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = __pyx_k_L; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = __pyx_k_q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = __pyx_k_Q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = __pyx_k_f; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = __pyx_k_d; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = __pyx_k_g; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = __pyx_k_Zf; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = __pyx_k_Zd; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = __pyx_k_Zg; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = __pyx_k_O; break; default: /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} break; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0); if (__pyx_t_6) { __pyx_t_7 = (__pyx_v_little_endian != 0); } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_6) { __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L11:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif __Pyx_NAMESTR("_polyprism"), __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_X1, __pyx_k_X1, sizeof(__pyx_k_X1), 0, 0, 1, 1}, {&__pyx_n_s_X2, __pyx_k_X2, sizeof(__pyx_k_X2), 0, 0, 1, 1}, {&__pyx_n_s_Y1, __pyx_k_Y1, sizeof(__pyx_k_Y1), 0, 0, 1, 1}, {&__pyx_n_s_Y2, __pyx_k_Y2, sizeof(__pyx_k_Y2), 0, 0, 1, 1}, {&__pyx_n_s_Z1, __pyx_k_Z1, sizeof(__pyx_k_Z1), 0, 0, 1, 1}, {&__pyx_n_s_Z1_sqr, __pyx_k_Z1_sqr, sizeof(__pyx_k_Z1_sqr), 0, 0, 1, 1}, {&__pyx_n_s_Z2, __pyx_k_Z2, sizeof(__pyx_k_Z2), 0, 0, 1, 1}, {&__pyx_n_s_Z2_sqr, __pyx_k_Z2_sqr, sizeof(__pyx_k_Z2_sqr), 0, 0, 1, 1}, {&__pyx_n_s_bx, __pyx_k_bx, sizeof(__pyx_k_bx), 0, 0, 1, 1}, {&__pyx_n_s_by, __pyx_k_by, sizeof(__pyx_k_by), 0, 0, 1, 1}, {&__pyx_n_s_bz, __pyx_k_bz, sizeof(__pyx_k_bz), 0, 0, 1, 1}, {&__pyx_n_s_density, __pyx_k_density, sizeof(__pyx_k_density), 0, 0, 1, 1}, {&__pyx_n_s_fatiando_gravmag__polyprism, __pyx_k_fatiando_gravmag__polyprism, sizeof(__pyx_k_fatiando_gravmag__polyprism), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_fx, __pyx_k_fx, sizeof(__pyx_k_fx), 0, 0, 1, 1}, {&__pyx_n_s_fy, __pyx_k_fy, sizeof(__pyx_k_fy), 0, 0, 1, 1}, {&__pyx_n_s_fz, __pyx_k_fz, sizeof(__pyx_k_fz), 0, 0, 1, 1}, {&__pyx_n_s_gxx, __pyx_k_gxx, sizeof(__pyx_k_gxx), 0, 0, 1, 1}, {&__pyx_n_s_gxy, __pyx_k_gxy, sizeof(__pyx_k_gxy), 0, 0, 1, 1}, {&__pyx_n_s_gxz, __pyx_k_gxz, sizeof(__pyx_k_gxz), 0, 0, 1, 1}, {&__pyx_n_s_gyy, __pyx_k_gyy, sizeof(__pyx_k_gyy), 0, 0, 1, 1}, {&__pyx_n_s_gyz, __pyx_k_gyz, sizeof(__pyx_k_gyz), 0, 0, 1, 1}, {&__pyx_n_s_gz, __pyx_k_gz, sizeof(__pyx_k_gz), 0, 0, 1, 1}, {&__pyx_n_s_gzz, __pyx_k_gzz, sizeof(__pyx_k_gzz), 0, 0, 1, 1}, {&__pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_k_home_leo_src_fatiando_fatiando, sizeof(__pyx_k_home_leo_src_fatiando_fatiando), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_kernel, __pyx_k_kernel, sizeof(__pyx_k_kernel), 0, 0, 1, 1}, {&__pyx_n_s_kp1, __pyx_k_kp1, sizeof(__pyx_k_kp1), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mx, __pyx_k_mx, sizeof(__pyx_k_mx), 0, 0, 1, 1}, {&__pyx_n_s_my, __pyx_k_my, sizeof(__pyx_k_my), 0, 0, 1, 1}, {&__pyx_n_s_mz, __pyx_k_mz, sizeof(__pyx_k_mz), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_nverts, __pyx_k_nverts, sizeof(__pyx_k_nverts), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tf, __pyx_k_tf, sizeof(__pyx_k_tf), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_v1, __pyx_k_v1, sizeof(__pyx_k_v1), 0, 0, 1, 1}, {&__pyx_n_s_v2, __pyx_k_v2, sizeof(__pyx_k_v2), 0, 0, 1, 1}, {&__pyx_n_s_v3, __pyx_k_v3, sizeof(__pyx_k_v3), 0, 0, 1, 1}, {&__pyx_n_s_v4, __pyx_k_v4, sizeof(__pyx_k_v4), 0, 0, 1, 1}, {&__pyx_n_s_v5, __pyx_k_v5, sizeof(__pyx_k_v5), 0, 0, 1, 1}, {&__pyx_n_s_v6, __pyx_k_v6, sizeof(__pyx_k_v6), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xp, __pyx_k_xp, sizeof(__pyx_k_xp), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_yp, __pyx_k_yp, sizeof(__pyx_k_yp), 0, 0, 1, 1}, {&__pyx_n_s_z1, __pyx_k_z1, sizeof(__pyx_k_z1), 0, 0, 1, 1}, {&__pyx_n_s_z2, __pyx_k_z2, sizeof(__pyx_k_z2), 0, 0, 1, 1}, {&__pyx_n_s_zp, __pyx_k_zp, sizeof(__pyx_k_zp), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__7 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gz, 334, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__9 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxx, 364, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__11 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxy, 394, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__13 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxz, 424, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__15 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyy, 454, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__17 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyz, 484, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__19 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gzz, 514, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__21 = PyTuple_Pack(33, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_fx, __pyx_n_s_fy, __pyx_n_s_fz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(14, 0, 33, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_tf, 544, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__23 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bx, 588, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__25 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v2, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_by, 624, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__27 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v3, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bz, 660, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_polyprism(void); /*proto*/ PyMODINIT_FUNC init_polyprism(void) #else PyMODINIT_FUNC PyInit__polyprism(void); /*proto*/ PyMODINIT_FUNC PyInit__polyprism(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__polyprism(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_polyprism"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_fatiando__gravmag___polyprism) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "fatiando.gravmag._polyprism")) { if (unlikely(PyDict_SetItemString(modules, "fatiando.gravmag._polyprism", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "fatiando/gravmag/_polyprism.pyx":6 * A pure python implementation is in _polyprism_numpy.py * """ * import numpy # <<<<<<<<<<<<<< * * from libc.math cimport log, atan2, sqrt */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "fatiando/gravmag/_polyprism.pyx":15 * from cython.parallel cimport prange, parallel * * DTYPE = numpy.float # <<<<<<<<<<<<<< * ctypedef numpy.float_t DTYPE_T * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gzz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_tf, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_19by, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_by, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":1 * #cython: embedsignature=True # <<<<<<<<<<<<<< * """ * This is a Cython implementation of the potential fields of a polygonal prism. */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init fatiando.gravmag._polyprism", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init fatiando.gravmag._polyprism"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; /* not a 'break' in the loop */ } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { long r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; #endif result = (*call)(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 Py_LeaveRecursiveCall(); #endif if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (result) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *getbuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer); if (getbuffer_cobj) { getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } } #endif PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *releasebuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer); if (releasebuffer_cobj) { releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { const unsigned int neg_one = (unsigned int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(unsigned int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(unsigned int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(unsigned int), little, !is_unsigned); } } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \ { \ func_type value = func(x); \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ PyErr_SetString(PyExc_OverflowError, \ (is_unsigned && unlikely(value < zero)) ? \ "can't convert negative value to " #target_type : \ "value too large to convert to " #target_type); \ return (target_type) -1; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { const unsigned int neg_one = (unsigned int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } return (unsigned int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(unsigned int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyLong_AsLong) } else if (sizeof(unsigned int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int) -1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned int) -1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(long) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/ *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else /* PY_VERSION_HEX < 0x03030000 */ if (PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_DATA_SIZE(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ return PyUnicode_AsUTF8AndSize(o, length); #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ #endif /* PY_VERSION_HEX < 0x03030000 */ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ #if !CYTHON_COMPILING_IN_PYPY #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject*)b)->ob_digit[0]; } #endif #endif #if PY_VERSION_HEX < 0x02060000 return PyInt_AsSsize_t(b); #else return PyLong_AsSsize_t(b); #endif } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } #endif /* Py_PYTHON_H */
betweennessCentrality.c
#include "defs.h" double betweennessCentrality(graph* G, DOUBLE_T* BC) { VERT_T *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist* P; /* predecessors of a vertex v on shortest paths from s */ DOUBLE_T* sig; /* No. of shortest paths */ LONG_T* d; /* Length of the shortest path between every pair */ DOUBLE_T* del; /* dependency of vertices */ LONG_T *in_degree, *numEdges, *pSums; LONG_T *pListMem; LONG_T* Srcs; LONG_T *start, *end; LONG_T MAX_NUM_PHASES; LONG_T *psCount; #ifdef _OPENMP omp_lock_t* vLock; LONG_T chunkSize; #endif int seed = 2387; double elapsed_time; #ifdef _OPENMP #pragma omp parallel { #endif VERT_T *myS, *myS_t; LONG_T myS_size; LONG_T i, j, k, p, count, myCount; LONG_T v, w, vert; LONG_T numV, num_traversals, n, m, phase_num; LONG_T tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = 2^K4approx */ numV = 1<<K4approx; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { Srcs = (LONG_T *) malloc(n*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Vertex ID permutation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Start timing code from here */ if (tid == 0) { elapsed_time = get_seconds(); #ifdef VERIFYK4 MAX_NUM_PHASES = 2*sqrt(n); #else MAX_NUM_PHASES = 50; #endif } #ifdef _OPENMP #pragma omp barrier #endif /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { P = (plist *) calloc(n, sizeof(plist)); in_degree = (LONG_T *) calloc(n+1, sizeof(LONG_T)); numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T)); pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); if (tid == 0) { pListMem = (LONG_T *) malloc(m*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); S = (VERT_T *) malloc(n*sizeof(VERT_T)); sig = (DOUBLE_T *) malloc(n*sizeof(DOUBLE_T)); d = (LONG_T *) malloc(n*sizeof(LONG_T)); del = (DOUBLE_T *) calloc(n, sizeof(DOUBLE_T)); start = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); end = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); psCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); } /* local memory for each thread */ myS_size = (2*n)/nthreads; myS = (LONG_T *) malloc(myS_size*sizeof(LONG_T)); num_traversals = 0; myCount = 0; #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif for (p=0; p<n; p++) { i = Srcs[p]; if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } if (num_traversals == numV + 1) { break; } if (tid == 0) { sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; } count = 1; phase_num = 0; #ifdef _OPENMP #pragma omp barrier #endif while (end[phase_num] - start[phase_num] > 0) { myCount = 0; #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(dynamic) #endif for (vert = start[phase_num]; vert < end[phase_num]; vert++) { v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { #ifndef VERIFYK4 /* Filter edges with weights divisible by 8 */ if ((G->weight[j] & 7) != 0) { #endif w = G->endV[j]; if (v != w) { #ifdef _OPENMP myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif /* w found for the first time? */ if (d[w] == -1) { if (myS_size == myCount) { /* Resize myS */ myS_t = (LONG_T *) malloc(2*myS_size*sizeof(VERT_T)); memcpy(myS_t, myS, myS_size*sizeof(VERT_T)); free(myS); myS = myS_t; myS_size = 2*myS_size; } myS[myCount++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } else { if ((d[w] == -1) || (d[w] == d[v]+ 1)) { omp_set_lock(&vLock[w]); sig[w] += sig[v]; P[w].list[P[w].count++] = v; omp_unset_lock(&vLock[w]); } } #endif } #ifndef VERIFYK4 } #endif } } /* Merge all local stacks for next iteration */ phase_num++; psCount[tid+1] = myCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { start[phase_num] = end[phase_num-1]; psCount[0] = start[phase_num]; for(k=1; k<=nthreads; k++) { psCount[k] = psCount[k-1] + psCount[k]; } end[phase_num] = psCount[nthreads]; } #ifdef _OPENMP #pragma omp barrier #endif for (k = psCount[tid]; k < psCount[tid+1]; k++) { S[k] = myS[k-psCount[tid]]; } #ifdef _OPENMP #pragma omp barrier #endif count = end[phase_num]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif while (phase_num > 0) { #ifdef _OPENMP #pragma omp for #endif for (j=start[phase_num]; j<end[phase_num]; j++) { w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } BC[w] += del[w]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif } #ifdef _OPENMP chunkSize = n/nthreads; #pragma omp for schedule(static, chunkSize) #endif for (j=0; j<count; j++) { w = S[j]; d[w] = -1; del[w] = 0; P[w].count = 0; } #ifdef _OPENMP #pragma omp barrier #endif } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(myS); if (tid == 0) { free(S); free(pListMem); free(P); free(sig); free(d); free(del); #ifdef _OPENMP free(vLock); #endif free(start); free(end); free(psCount); elapsed_time = get_seconds() - elapsed_time; free(Srcs); } free_sprng(stream); #ifdef _OPENMP } #endif /* Verification */ #ifdef VERIFYK4 double BCval; if (SCALE % 2 == 0) { BCval = 0.5*pow(2, 3*SCALE/2)-pow(2, SCALE)+1.0; } else { BCval = 0.75*pow(2, (3*SCALE-1)/2)-pow(2, SCALE)+1.0; } int failed = 0; for (int i=0; i<G->n; i++) { if (round(BC[i] - BCval) != 0) { failed = 1; break; } } if (failed) { fprintf(stderr, "Kernel 4 failed validation!\n"); } else { fprintf(stderr, "Kernel 4 validation successful!\n"); } #endif return elapsed_time; }
GB_unop__isnan_bool_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isnan_bool_fc64 // op(A') function: GB_unop_tran__isnan_bool_fc64 // C type: bool // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = GB_cisnan (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisnan (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = GB_cisnan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isnan_bool_fc64 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisnan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = GB_cisnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isnan_bool_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sizeof.c
// Liao, 11/17/2009 // Test SgSizeOfOp::replace_expression() // Distilled from spec_omp2001/benchspec/OMPM2001/332.ammp_m/atoms.c int atom() { int serial; #pragma omp parallel { int i =sizeof(serial); } return 1; }