source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
newtonforwardinterpolation.c | #include<stdio.h>
#define MAXN 100
#define ORDER 4
int main()
{
float ax[MAXN+1], ay [MAXN+1], diff[MAXN+1][ORDER+1], nr[ORDER+1], dr[ORDER+1],x,p,h,yp;
int n,i,j,k;
printf("\nEnter the value of n:\n");
scanf("%d",&n);
printf("\nEnter the values in form x,y:\n");
for (i=0;i<=n;i++)
scanf("%f %f",&ax[i],&ay[i]);
printf("\nEnter the value of x for which the value of y is wanted: \n");
scanf("%f",&x);
h=ax[1]-ax[0];
//now making the difference table
//calculating the 1st order of differences
#pragma omp parallel for private(i) shared(diff)
for (i=0;i<=n-1;i++)
diff[i][1] = ay[i+1]-ay[i];
//now calculating the second and higher order differences
#pragma omp parallel for private(i,j) shared(diff)
for (j=2;j<=ORDER;j++)
for(i=0;i<=n-j;i++)
diff[i][j] = diff[i+1][j-1] - diff[i][j-1];
//now finding x0
i=0;
while (!(ax[i]>x))
i++;
//now ax[i] is x0 and ay[i] is y0
i--;
p = (x-ax[i])/h;
yp = ay[i];
//now carrying out interpolation
nr[0] = 1.0;
dr[0] = 1.0;
for (k=1;k<=ORDER;k++)
{
nr[k]=nr[k-1]*(p-k+1);
dr[k]=dr[k-1]*k;
}
#pragma omp parallel for private(k) reduction(+:yp)
for (k=1;k<=ORDER;k++)
{
yp +=(nr[k]/dr[k])*diff[i][k];
}
printf("\nWhen x = %6.1f, corresponding y = %6.2f\n",x,yp);
return 0;
} |
camp.c | /* (c) 1996,1997 Peter Sanders, Ingo Boesnach */
/* simulate a cellular automaton (serial version)
* periodic boundaries
*
* #1: Number of lines
* #2: Number of iterations to be simulated
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <omp.h>
#include "random.h"
#include "md5tool.h"
/* horizontal size of the configuration */
#define XSIZE 1024
/* "ADT" State and line of states (plus border) */
typedef char State;
typedef State Line[XSIZE + 2];
/* determine random integer between 0 and n-1 */
#define randInt(n) ((int)(nextRandomLEcuyer() * n))
/* --------------------- CA simulation -------------------------------- */
int n_threads =1;
/* random starting configuration */
static void initConfig(Line *buf, int lines){
int x, y;
initRandomLEcuyer(424243);
for (y = 1; y <= lines; y++) {
for (x = 1; x <= XSIZE; x++) {
buf[y][x] = randInt(100) >= 50;
}
}
}
/* annealing rule from ChoDro96 page 34
* the table is used to map the number of nonzero
* states in the neighborhood to the new state
*/
static State anneal[10] = {0, 0, 0, 0, 1, 0, 1, 1, 1, 1};
/* a: pointer to array; x,y: coordinates; result: n-th element of anneal,
where n is the number of neighbors */
#define transition(a, x, y) \
(anneal[(a)[(y)-1][(x)-1] + (a)[(y)][(x)-1] + (a)[(y)+1][(x)-1] +\
(a)[(y)-1][(x) ] + (a)[(y)][(x) ] + (a)[(y)+1][(x) ] +\
(a)[(y)-1][(x)+1] + (a)[(y)][(x)+1] + (a)[(y)+1][(x)+1]])
/* treat torus like boundary conditions */
static void boundary(Line *buf, int lines){
int x,y;
for (y = 0; y <= lines+1; y++) {
/* copy rightmost column to the buffer column 0 */
buf[y][0 ] = buf[y][XSIZE];
/* copy leftmost column to the buffer column XSIZE + 1 */
buf[y][XSIZE+1] = buf[y][1 ];
}
for (x = 0; x <= XSIZE+1; x++) {
/* copy bottommost row to buffer row 0 */
buf[0][x ] = buf[lines][x];
/* copy topmost row to buffer row lines + 1 */
buf[lines+1][x] = buf[1][x ];
}
}
/* make one simulation iteration with lines lines.
* old configuration is in from, new one is written to to.
*/
static void simulate(Line *from, Line *to, int lines){
int x,y;
boundary(from, lines);
#pragma omp parallel for num_threads(n_threads) shared(to) private(x,y)
for (y = 1; y <= lines; y++) {
for (x = 1; x <= XSIZE; x++) {
to[y][x ] = transition(from, x , y);
}
}
}
/* --------------------- measurement ---------------------------------- */
int main(int argc, char** argv){
int lines, its;
int i;
Line *from, *to, *temp;
char* hash;
assert(argc == 4);
lines = atoi(argv[1]);
its = atoi(argv[2]);
n_threads = atoi(argv[3]);
printf("%d\n",n_threads);
from = malloc((lines + 2) * sizeof(Line));
to = malloc((lines + 2) * sizeof(Line));
initConfig(from, lines);
for (i = 0; i < its; i++) {
simulate(from, to, lines);
temp = from;
from = to;
to = temp;
}
hash = getMD5DigestStr(from[1], sizeof(Line) * (lines));
printf("hash: %s\n", hash);
free(from);
free(to);
free(hash);
return EXIT_SUCCESS;
}
|
cartesiangrid.h | #ifndef GRIDGENERATOR_CARTESIANGRID_H
#define GRIDGENERATOR_CARTESIANGRID_H
#include <gcem.hpp>
#include <common/surface.h>
#include <set>
#include <sfcmm_common.h>
#include "cartesiangrid_base.h"
#include "common/configuration.h"
#include "common/IO.h"
#include "geometry.h"
#include "globaltimers.h"
#ifdef SOLVER_AVAILABLE
#include "gridgenerator/cartesiangrid_generation.h"
#include "lbm/constants.h"
#else
#include "cartesiangrid_generation.h"
#endif
#include "interface/grid_interface.h"
template <Debug_Level DEBUG_LEVEL, GInt NDIM>
class CartesianGrid : public BaseCartesianGrid<DEBUG_LEVEL, NDIM>, private Configuration {
public:
/// Underlying enum type for property access
using Cell = CellProperties;
/// Underlying bitset type for property storage
using PropertyBitsetType = grid::cell::BitsetType;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::checkBounds;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::property;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::size;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::noCells;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::empty;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::lengthOnLvl;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::hasParent;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::parent;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::capacity;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::level;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::globalId;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::center;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::checkDir;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::currentHighestLvl;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::partitionLvl;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::setMaxLvl;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::boundingBox;
using BaseCartesianGrid<DEBUG_LEVEL, NDIM>::setBoundingBox;
CartesianGrid() = default;
~CartesianGrid() override = default;
CartesianGrid(const CartesianGrid&) = delete;
CartesianGrid(CartesianGrid&&) = delete;
auto operator=(const CartesianGrid&) -> CartesianGrid& = delete;
auto operator=(CartesianGrid&&) -> CartesianGrid& = delete;
inline auto child(const GInt id, const GInt pos) -> GInt& {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkChildPos(pos);
checkBounds(id);
return m_childIds.at(id * cartesian::maxNoChildren<NDIM>() + pos);
}
// no bound checking
return m_childIds[id * cartesian::maxNoChildren<NDIM>() + pos];
}
[[nodiscard]] inline auto child(const GInt id, const GInt pos) const -> GInt {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkChildPos(pos);
checkBounds(id);
return m_childIds.at(id * cartesian::maxNoChildren<NDIM>() + pos);
}
// no bound checking
return m_childIds[id * cartesian::maxNoChildren<NDIM>() + pos];
}
[[nodiscard]] inline auto hasChild(const GInt id, const GInt pos) const -> GBool {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkChildPos(pos);
checkBounds(id);
return m_childIds.at(id * cartesian::maxNoChildren<NDIM>() + pos) > -1;
}
// no bound checking
return m_childIds[id * cartesian::maxNoChildren<NDIM>() + pos] > -1;
}
[[nodiscard]] inline auto hasChildren(const GInt id) const -> GBool {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
}
return noChildren(id) > 0;
}
[[nodiscard]] inline auto noChildren(const GInt id) const -> GInt {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
}
return std::count_if(&m_childIds[id * cartesian::maxNoChildren<NDIM>() + 0],
&m_childIds[id * cartesian::maxNoChildren<NDIM>() + cartesian::maxNoChildren<NDIM>()],
[](const GInt childId) { return childId > -1; });
}
// Neighbors
inline auto neighbor() const -> const auto& { return m_nghbrIds; }
// todo: make diagonal neighbors a template parameter
[[nodiscard]] inline auto neighbor(const GInt id, const GInt dir) const -> GInt override {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
// checkDir(dir);
if(m_diagonalNghbrs) {
return m_nghbrIds.at(id * cartesian::maxNoNghbrsDiag<NDIM>() + dir);
}
return m_nghbrIds.at(id * cartesian::maxNoNghbrs<NDIM>() + dir);
}
if(m_diagonalNghbrs) {
return m_nghbrIds[id * cartesian::maxNoNghbrsDiag<NDIM>() + dir];
}
return m_nghbrIds[id * cartesian::maxNoNghbrs<NDIM>() + dir];
}
inline auto neighbor(const GInt id, const GInt dir) -> GInt& {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
// checkDir(dir);
if(m_diagonalNghbrs) {
return m_nghbrIds.at(id * cartesian::maxNoNghbrsDiag<NDIM>() + dir);
}
return m_nghbrIds.at(id * cartesian::maxNoNghbrs<NDIM>() + dir);
}
if(m_diagonalNghbrs) {
return m_nghbrIds[id * cartesian::maxNoNghbrsDiag<NDIM>() + dir];
}
return m_nghbrIds[id * cartesian::maxNoNghbrs<NDIM>() + dir];
}
[[nodiscard]] inline auto hasNeighbor(const GInt id, const GInt dir) const -> GBool {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
checkDir(dir);
if(m_diagonalNghbrs) {
return m_nghbrIds.at(id * cartesian::maxNoNghbrsDiag<NDIM>() + dir) != INVALID_CELLID;
}
return m_nghbrIds.at(id * cartesian::maxNoNghbrs<NDIM>() + dir) != INVALID_CELLID;
}
if(m_diagonalNghbrs) {
return m_nghbrIds[id * cartesian::maxNoNghbrsDiag<NDIM>() + dir] != INVALID_CELLID;
}
return m_nghbrIds[id * cartesian::maxNoNghbrs<NDIM>() + dir] != INVALID_CELLID;
}
[[nodiscard]] inline auto hasAnyNeighbor(const GInt id, const GInt dir) const -> GBool {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
checkDir(dir);
}
return hasNeighbor(id, dir) || (hasParent(id) && hasNeighbor(parent(id), dir));
}
// Other data fields
inline auto weight(const GInt id) -> GFloat& {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
return m_weight.at(id);
}
return m_weight[id];
}
[[nodiscard]] inline auto weight(const GInt id) const -> GFloat {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
return m_weight.at(id);
}
return m_weight[id];
}
// Other data fields (subject to change)
inline auto noOffsprings(const GInt id) -> GInt& {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
return m_noOffsprings.at(id);
}
return m_noOffsprings[id];
}
[[nodiscard]] inline auto noOffsprings(const GInt id) const -> GInt {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
return m_noOffsprings.at(id);
}
return m_noOffsprings[id];
}
inline auto workload(const GInt id) -> GFloat& {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
return m_workload.at(id);
}
return m_workload[id];
}
[[nodiscard]] inline auto workload(const GInt id) const -> GFloat {
if(DEBUG_LEVEL >= Debug_Level::debug) {
checkBounds(id);
return m_workload.at(id);
}
return m_workload[id];
}
/// Return number of properties defined for each node
static constexpr auto noProperties() -> GInt { return grid::cell::p(Cell::NumProperties); }
[[nodiscard]] inline auto noLeafCells() const -> GInt { return m_noLeafCells; }
[[nodiscard]] inline auto noBndCells() const -> GInt { return m_noBndCells; }
void setCapacity(const GInt _capacity) override {
if(!empty()) {
TERMM(-1, "Invalid operation tree already allocated.");
}
m_childIds.resize(_capacity * cartesian::maxNoChildren<NDIM>());
m_nghbrIds.resize(_capacity * cartesian::maxNoNghbrsDiag<NDIM>());
m_weight.resize(_capacity);
m_noOffsprings.resize(_capacity);
m_workload.resize(_capacity);
BaseCartesianGrid<DEBUG_LEVEL, NDIM>::setCapacity(_capacity);
reset();
}
void reset() override {
std::fill(m_childIds.begin(), m_childIds.end(), INVALID_CELLID);
std::fill(m_nghbrIds.begin(), m_nghbrIds.end(), INVALID_CELLID);
std::fill(m_weight.begin(), m_weight.end(), NAN);
std::fill(m_noOffsprings.begin(), m_noOffsprings.end(), INVALID_CELLID);
std::fill(m_workload.begin(), m_workload.end(), NAN);
for(GInt i = 0; i < capacity(); ++i) {
property(i).reset();
center(i).fill(NAN);
}
BaseCartesianGrid<DEBUG_LEVEL, NDIM>::reset();
}
void save(const GString& /*fileName*/, const json& /*gridOutConfig*/) const override { TERMM(-1, "Not implemented!"); }
auto bndrySurface(const GString& id) -> Surface<DEBUG_LEVEL, NDIM>& {
if(DEBUG_LEVEL >= Debug_Level::debug) {
if(m_bndrySurfaces.count(id) == 0) {
TERMM(-1, "Invalid bndryId \"" + id + "\"");
}
}
return m_bndrySurfaces.at(id);
}
/// Load the generated grid in-memory and set additional properties
/// \param grid Generated grid.
void loadGridInplace(const CartesianGridGen<DEBUG_LEVEL, NDIM>& grid, const json& properties) {
setConfiguration(properties);
// grid.balance(); //todo: implement
setCapacity(grid.capacity()); // todo: change for adaptation
m_geometry = grid.geometry();
size() = grid.size();
currentHighestLvl() = grid.currentHighestLvl();
partitionLvl() = grid.partitionLvl();
setMaxLvl(grid.maxLvl());
setBoundingBox(grid.boundingBox());
#ifdef _OPENMP
#pragma omp parallel default(none) shared(grid)
{
#endif
#ifdef _OPENMP
#pragma omp for
#endif
for(GInt cellId = 0; cellId < noCells(); ++cellId) {
globalId(cellId) = grid.globalId(cellId);
parent(cellId) = grid.parent(cellId);
for(GInt childId = 0; childId < cartesian::maxNoChildren<NDIM>(); ++childId) {
child(cellId, childId) = grid.child(cellId, childId);
}
for(GInt nghbrId = 0; nghbrId < cartesian::maxNoNghbrs<NDIM>(); ++nghbrId) {
neighbor(cellId, nghbrId) = grid.neighbor(cellId, nghbrId);
}
level(cellId) = grid.level(cellId);
for(GInt dir = 0; dir < NDIM; ++dir) {
center(cellId, dir) = grid.center(cellId, dir);
}
}
#ifdef _OPENMP
}
#endif
m_axisAlignedBnd = opt_config_value<GBool>("assumeAxisAligned", m_axisAlignedBnd);
if(!m_axisAlignedBnd) {
TERMM(-1, "Not implemented!");
}
m_periodic = has_any_key_value("type", "periodic");
setProperties();
determineBoundaryCells();
identifyBndrySurfaces();
setupPeriodicConnections();
// addGhostCells();
addDiagonalNghbrs();
for(auto& [name, srf] : m_bndrySurfaces) {
srf.updateNeighbors();
}
// if(m_loadBalancing) {
// setWorkload();
// calculateOffspringsAndWeights();
// }
}
/// Add ghost cells
void addGhostCells() {
// todo: make settable
const GBool addGhostLayers = false;
if(addGhostLayers) {
// check all surfaces and add ghostcells in all missing dist directions
for(const auto& [srfName, srf] : m_bndrySurfaces) {
for(GInt cellId : srf.getCellList()) {
for(GInt nghbrDir = 0; nghbrDir < cartesian::maxNoNghbrs<NDIM>(); ++nghbrDir) {
if(neighbor(cellId, nghbrDir) == INVALID_CELLID) {
const GInt ghostCellId = size() + m_noGhostsCells;
cerr0 << "adding cell " << ghostCellId << " as neighbor to " << cellId << std::endl; // todo: remove
neighbor(cellId, nghbrDir) = ghostCellId;
neighbor(ghostCellId, cartesian::oppositeDir(nghbrDir)) = cellId;
property(ghostCellId, CellProperties::ghost);
++m_noGhostsCells;
}
}
}
}
}
}
/// Add the diagonal(2D/3D) and/or tridiagonal (3D) to the neighbor connections of each cell.
void addDiagonalNghbrs() {
m_diagonalNghbrs = true;
auto tmpNghbr = m_nghbrIds;
auto tmpN = [&](const GInt cellId, const GInt dir) { return tmpNghbr[cellId * cartesian::maxNoNghbrs<NDIM>() + dir]; };
for(GInt cellId = 0; cellId < size(); ++cellId) {
// dirs 0=-x 1=+x 2=-y 3=+y
// copy existing neighbor connections
for(GInt dir = 0; dir < cartesian::maxNoNghbrs<NDIM>(); ++dir) {
neighbor(cellId, dir) = tmpN(cellId, dir);
}
// add diagonal nghbrs
if(NDIM > 1) {
const GInt nghbrmX = tmpN(cellId, 0);
const GInt nghbrpX = tmpN(cellId, 1);
// +x+y
const GInt nghbrpXpY = nghbrpX != INVALID_CELLID ? tmpN(nghbrpX, 3) : -1;
neighbor(cellId, cartesian::maxNoNghbrs<NDIM>()) = nghbrpXpY;
// +x-y
const GInt nghbrpXmY = nghbrpX != INVALID_CELLID ? tmpN(nghbrpX, 2) : -1;
neighbor(cellId, cartesian::maxNoNghbrs<NDIM>() + 1) = nghbrpXmY;
// -x-y
const GInt nghbrmXmY = nghbrmX != INVALID_CELLID ? tmpN(nghbrmX, 2) : -1;
neighbor(cellId, cartesian::maxNoNghbrs<NDIM>() + 2) = nghbrmXmY;
// -x+y
const GInt nghbrmXpY = nghbrmX != INVALID_CELLID ? tmpN(nghbrmX, 3) : -1;
neighbor(cellId, cartesian::maxNoNghbrs<NDIM>() + 3) = nghbrmXpY;
}
// add tridiagonal nghbrs
if(NDIM > 2) {
TERMM(-1, "Not implemented!");
}
}
}
auto getCartesianGridData() const -> CartesianGridData<NDIM> { return CartesianGridData<NDIM>(*this); }
auto totalSize() const -> GInt { return size() + m_noGhostsCells; }
private:
void setProperties() {
for(GInt cellId = 0; cellId < noCells(); ++cellId) {
const GBool isLeaf = noChildren(cellId) == 0;
property(cellId, CellProperties::leaf) = isLeaf;
m_noLeafCells += static_cast<GInt>(isLeaf);
}
};
void determineBoundaryCells() {
for(GInt cellId = 0; cellId < noCells(); ++cellId) {
// is a partition cell determine for each if it can be a boundary cell (no existent parent)
// parent has a cut with the boundary -> possible cut of child!
if(parent(cellId) == -1 || property(parent(cellId), CellProperties::bndry)) {
const GDouble cellLength = lengthOnLvl(std::to_integer<GInt>(level(cellId)));
property(cellId, CellProperties::bndry) = m_geometry->cutWithCell(center(cellId), cellLength);
// if(DEBUG_LEVEL > Debug_Level::min_debug && property(cellId, CellProperties::bndry)){
if(property(cellId, CellProperties::bndry)) {
GInt noNeighbors = 0;
for(GInt nghbrId = 0; nghbrId < cartesian::maxNoNghbrs<NDIM>(); ++nghbrId) {
if(neighbor(cellId, nghbrId) != INVALID_CELLID) {
++noNeighbors;
}
}
if(cartesian::maxNoNghbrs<NDIM>() == noNeighbors) {
// cerr0 << "Removed boundary property cellId: " << cellId << " (" << center(cellId)[0] << ", " << center(cellId)[1]
// << ") L:" << cellLength << std::endl;
property(cellId, CellProperties::bndry) = false;
logger << "Simplified bndry process!!!" << std::endl;
// TERMM(-1, "Cell marked as boundary, but is not on a boundary!");
}
}
}
m_noBndCells += static_cast<GInt>(property(cellId, CellProperties::bndry) && property(cellId, CellProperties::leaf));
}
}
#ifdef CLANG_COMPILER
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunknown-pragmas"
#pragma ide diagnostic ignored "cppcoreguidelines-pro-bounds-constant-array-index"
#endif
void identifyBndrySurfaces() {
if(m_axisAlignedBnd) {
for(GInt surfId = 0; surfId < cartesian::maxNoNghbrs<NDIM>(); ++surfId) {
m_bndrySurfaces.insert({static_cast<GString>(DirIdString[surfId]), Surface<DEBUG_LEVEL, NDIM>(this->getCartesianGridData())});
}
for(GInt cellId = 0; cellId < size(); ++cellId) {
if(property(cellId, Cell::bndry)) {
for(GInt dir = 0; dir < cartesian::maxNoNghbrs<NDIM>(); ++dir) {
if(!hasNeighbor(cellId, dir)) {
m_bndrySurfaces.at(static_cast<GString>(DirIdString[dir])).addCell(cellId, dir);
}
}
}
}
} else {
TERMM(-1, "Not implemented");
}
}
#ifdef CLANG_COMPILER
#pragma clang diagnostic pop
#endif
void setupPeriodicConnections() {
if(m_periodic) {
std::vector<json> periodicBnds = get_all_items_with_value("periodic");
std::unordered_map<GString, GString> periodicConnections;
for(const auto& bnd : periodicBnds) {
for(const auto& [surfName, props] : bnd.items()) {
// check if periodic boundary should be handled as a boundary condition
const auto generateBndry = config::opt_config_value<GBool>(props, "generateBndry", true);
// skip if it should be
if(!generateBndry) {
periodicConnections.emplace(surfName, config::required_config_value<GString>(props, "connection"));
}
}
}
if(!periodicConnections.empty()) {
logger << "Setting up periodic connections!" << std::endl;
for(const auto& connection : periodicConnections) {
if(periodicConnections.count(connection.second) != 0) {
periodicConnections.erase(connection.second);
addPeriodicConnection(bndrySurface(connection.first), bndrySurface(connection.second));
}
}
}
}
}
// todo: fix for refinement level changes
// todo: simplify
void addPeriodicConnection(const Surface<DEBUG_LEVEL, NDIM>& surfA, const Surface<DEBUG_LEVEL, NDIM>& surfB) {
// connect cells of surfA and surfB
for(const GInt cellIdA : surfA.getCellList()) {
for(const GInt cellIdB : surfB.getCellList()) {
GInt notMatchingDir = -1;
const auto& centerA = center(cellIdA);
const auto& centerB = center(cellIdB);
// find cells of surfA and surfB to connect
for(GInt dir = 0; dir < NDIM; ++dir) {
// connect cells that have one direction which is identical
if(std::abs(centerA[dir] - centerB[dir]) > GDoubleEps) {
if(notMatchingDir >= 0) {
// periodic connection not possible since two directions don't match (3D)
notMatchingDir = -1;
break;
}
notMatchingDir = dir;
continue;
}
}
// cells need to be connected
if(notMatchingDir >= 0) {
// identify periodic direction for each cell
const GInt nghbrDir = 2 * notMatchingDir;
if(centerA[notMatchingDir] > centerB[notMatchingDir]) {
if constexpr(DEBUG_LEVEL >= Debug_Level::debug) {
if(neighbor(cellIdB, nghbrDir) != INVALID_CELLID) {
TERMM(-1, "Invalid set periodic connection! cellIdB:" + std::to_string(cellIdB) + " dir:" + std::to_string(nghbrDir));
}
if(neighbor(cellIdA, nghbrDir + 1) != INVALID_CELLID) {
TERMM(-1, "Invalid set periodic connection! cellIdA:" + std::to_string(cellIdA) + " dir:" + std::to_string(nghbrDir + 1));
}
}
neighbor(cellIdB, nghbrDir) = cellIdA;
neighbor(cellIdA, nghbrDir + 1) = cellIdB;
} else {
if constexpr(DEBUG_LEVEL >= Debug_Level::debug) {
if(neighbor(cellIdA, nghbrDir) != INVALID_CELLID) {
TERMM(-1, "Invalid set periodic connection! cellIdA:" + std::to_string(cellIdA) + " dir:" + std::to_string(nghbrDir));
}
if(neighbor(cellIdB, nghbrDir + 1) != INVALID_CELLID) {
TERMM(-1, "Invalid set periodic connection! cellIdB:" + std::to_string(cellIdB) + " dir:" + std::to_string(nghbrDir + 1));
}
}
neighbor(cellIdA, nghbrDir) = cellIdB;
neighbor(cellIdB, nghbrDir + 1) = cellIdA;
}
}
}
}
}
void setWorkload() { TERMM(-1, "Not implemented!"); };
void calculateOffspringsAndWeights() { TERMM(-1, "Not implemented!"); };
void invalidate(const GInt begin, const GInt end) {
std::fill(&parent(begin), &parent(end), INVALID_CELLID);
std::fill(m_childIds.begin() + begin * cartesian::maxNoChildren<NDIM>(),
m_childIds.begin() + end * cartesian::maxNoChildren<NDIM>(),
INVALID_CELLID);
std::fill(m_nghbrIds.begin() + begin * cartesian::maxNoNghbrsDiag<NDIM>(),
m_nghbrIds.begin() + end * cartesian::maxNoNghbrsDiag<NDIM>(),
INVALID_CELLID);
std::fill(&globalId(begin), &globalId(end), INVALID_CELLID);
std::fill(&level(begin), &level(end), std::byte(-1));
std::fill(¢er(begin), center(end), NAN);
std::fill(m_weight.begin() + begin, m_weight.begin() + end, NAN);
std::fill(m_noOffsprings.begin() + begin, m_noOffsprings.begin() + end, INVALID_CELLID);
std::fill(m_workload.begin() + begin, m_workload.begin() + end, NAN);
for(GInt i = 0; i < capacity(); ++i) {
property(i).reset();
}
}
// template <class Functor, class T>
// void rawCopyGeneric(Functor&& c, const T& source, const GInt begin, const GInt end, const GInt destination);
void deleteConnectivity(const GInt begin, const GInt end) {
for(GInt i = begin; i < end; i++) {
// Parent
if(hasParent(i)) {
const GInt p = parent(i);
for(GInt j = 0; j < cartesian::maxNoChildren<NDIM>(); j++) {
if(child(p, j) == i) {
child(p, j) = -1;
}
}
}
// Children
for(GInt j = 0; j < cartesian::maxNoChildren<NDIM>(); j++) {
if(hasChild(i, j)) {
parent(child(i, j)) = -1;
}
}
// Neighbors
for(GInt j = 0; j < cartesian::maxNoNghbrs<NDIM>(); j++) {
if(hasNeighbor(i, j)) {
neighbor(neighbor(i, j), cartesian::oppositeDir(j)) = -1;
}
}
}
}
void moveConnectivity(const GInt begin, const GInt end, const GInt to) {
// Auxiliary method for checking if a given id is within the original range that was moved
auto inMovedRange = [begin, end](const GInt id) { return (id >= begin && id < end); };
// General strategy:
// 1) Loop over moved nodes and check all tree connections (parents/children/neighbors)
// 2) If a given connection is to a node that was moved: apply offset to current node
// 3) If a given connection is to a node that was not moved: change connectivity in other node
for(GInt from = begin; from < end; ++from) {
const GInt distance = to - begin;
const GInt destination = from + distance;
// Parent
if(hasParent(destination)) {
const GInt p = parent(destination);
if(inMovedRange(p)) {
parent(destination) += distance;
} else {
for(GInt j = 0; j < cartesian::maxNoChildren<NDIM>(); ++j) {
if(child(p, j) == from) {
child(p, j) = destination;
}
}
}
}
// Children
for(GInt j = 0; j < cartesian::maxNoChildren<NDIM>(); ++j) {
if(hasChild(destination, j)) {
const GInt c = child(destination, j);
if(inMovedRange(c)) {
child(destination, j) += distance;
} else {
parent(c) = destination;
}
}
}
// Neighbors
for(GInt j = 0; j < cartesian::maxNoNghbrs<NDIM>(); ++j) {
if(hasNeighbor(destination, j)) {
const GInt n = neighbor(destination, j);
if(inMovedRange(n)) {
neighbor(destination, j) += distance;
} else {
neighbor(n, cartesian::oppositeDir(j)) = destination;
}
}
}
}
}
void checkChildPos(const GInt pos) const {
if(pos > cartesian::maxNoChildren<NDIM>() || pos < 0) {
TERMM(-1, "Invalid child position");
}
}
// cartesian::Tree<DEBUG_LEVEL, NDIM> m_tree{};
std::shared_ptr<GeometryManager<DEBUG_LEVEL, NDIM>> m_geometry;
GInt m_noLeafCells = 0;
GInt m_noBndCells = 0;
GInt m_noGhostsCells = 0;
GBool m_loadBalancing = false;
GBool m_diagonalNghbrs = false;
GBool m_axisAlignedBnd = false;
GBool m_periodic = false;
std::unordered_map<GString, Surface<DEBUG_LEVEL, NDIM>> m_bndrySurfaces;
// Data containers
std::vector<GInt> m_childIds{};
std::vector<GInt> m_nghbrIds{};
std::vector<GInt> m_noOffsprings{};
std::vector<GFloat> m_weight{};
std::vector<GFloat> m_workload{};
};
#endif // GRIDGENERATOR_CARTESIANGRID_H
|
LAGraph_matrix_extract_keep_dimensions.c | //------------------------------------------------------------------------------
// LAGraph_matrix_extract_keep_dimensions: extract submatrix but keep the
// dimensions of the original matrix
// ------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
//------------------------------------------------------------------------------
// LAGraph_Matrix_extract_keep_dimensions: Contributed by Gabor Szarnyas.
// Budapest University of Technology and Economics
// (with accented characters: G\'{a}bor Sz\'{a}rnyas).
// Compute the
#include <LAGraph.h>
#include <LAGraphX.h>
#define LAGraph_FREE_ALL \
{ \
GrB_free(&C) ; \
GrB_free(&type) ; \
}
//****************************************************************************
typedef struct
{
const GrB_Index nv; // number of vertices
const bool* Vdense; // array denoting whether a vertex should be kept
} Vdense_struct_type;
bool select_submatrix_elements_fun(
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
const GrB_Index i, const GrB_Index j,
#else
const int64_t i, const int64_t j,
#endif
const void *x, const void *thunk)
{
Vdense_struct_type* indices = (Vdense_struct_type*) (thunk);
return indices->Vdense[i] && indices->Vdense[j];
}
//------------------------------------------------------------------------------
GrB_Info LAGraph_Matrix_extract_keep_dimensions // extract submatrix but keep
// the dimensions of the
// original matrix
(
GrB_Matrix *Chandle, // output matrix
const GrB_Matrix A, // input matrix
const GrB_Index *Vsparse, // sorted list of vertex indices
const bool *Vdense, // boolean array of vertices
GrB_Index nv // number of vertex indices
)
{
#if !defined(LG_SUITESPARSE)
return GrB_PANIC;
#else
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Type type;
GrB_Index n ;
GrB_Matrix C = NULL ;
LAGRAPH_OK (GxB_Matrix_type(&type, A));
LAGRAPH_OK (GrB_Matrix_nrows (&n, A));
LAGRAPH_OK (GrB_Matrix_new (&C, type, n, n));
if (Vsparse == NULL && Vdense == NULL)
{
LAGRAPH_ERROR("Both Vsparse and Vdense are set to NULL", GrB_NULL_POINTER)
}
if (Vsparse == NULL) // use Vdense and GxB_select
{
Vdense_struct_type vdense_struct = {.nv = nv, .Vdense = Vdense};
GrB_Type Vdense_type;
LAGRAPH_OK (GrB_Type_new(&Vdense_type, sizeof(vdense_struct)));
GxB_Scalar vdense_thunk;
LAGRAPH_OK (GxB_Scalar_new(&vdense_thunk, Vdense_type));
LAGRAPH_OK (GxB_Scalar_setElement(vdense_thunk, (void*) &vdense_struct));
GxB_SelectOp select_submatrix_elements_op;
LAGRAPH_OK (GxB_SelectOp_new(
&select_submatrix_elements_op,
select_submatrix_elements_fun,
NULL, Vdense_type));
LAGRAPH_OK (GxB_select(C, NULL, NULL, select_submatrix_elements_op,
A, vdense_thunk, NULL));
GrB_free(&select_submatrix_elements_op); select_submatrix_elements_op = NULL;
GrB_free(&vdense_thunk); vdense_thunk = NULL;
GrB_free(&Vdense_type); Vdense_type = NULL;
}
else
{
GrB_Matrix D; // diagonal matrix used to select rows/columns
LAGRAPH_OK (GrB_Matrix_new(&D, GrB_BOOL, n, n));
bool* X = LAGraph_Malloc(nv, sizeof(GrB_BOOL)) ;
if (X == NULL)
{
LAGRAPH_ERROR("out of memory", GrB_OUT_OF_MEMORY)
}
int nthreads;
LAGraph_GetNumThreads(&nthreads, NULL) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index i = 0; i < nv; i++)
{
X[i] = true;
}
LAGRAPH_OK (GrB_Matrix_build(D, Vsparse, Vsparse, X, nv, GrB_LOR));
GxB_Format_Value A_format;
LAGRAPH_OK(GxB_get(A, GxB_FORMAT, &A_format))
if (A_format == GxB_BY_ROW) // C = (D*A)*D
{
LAGRAPH_OK (GrB_mxm(C, NULL, NULL, GxB_ANY_SECOND_FP64, D, A, NULL));
LAGRAPH_OK (GrB_mxm(C, NULL, NULL, GxB_ANY_FIRST_FP64, C, D, NULL));
}
else // A_format == GxB_BY_COL: C = D*(A*D)
{
LAGRAPH_OK (GrB_mxm(C, NULL, NULL, GxB_ANY_FIRST_FP64, A, D, NULL));
LAGRAPH_OK (GrB_mxm(C, NULL, NULL, GxB_ANY_SECOND_FP64, D, C, NULL));
}
GrB_free(&D); D = NULL;
}
(*Chandle) = C ;
return (GrB_SUCCESS) ;
#endif
}
|
loop1.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i,j;
int innerreps = 100;
#pragma omp parallel private(j)
{
// for (j=0; j<innerreps; j++)
{
#pragma omp for schedule(static,2)
for (i=0; i<32; i++)
{
printf ("thread %d is executing %d \n",omp_get_thread_num(),i);
// delay(500);
}
}
}
return 0;
}
|
compatibility.h | // -*- C++ -*-
// Copyright (C) 2007-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/compatibility.h
* @brief Compatibility layer, mostly concerned with atomic operations.
*
* This file is a GNU parallel extension to the Standard C++ Library
* and contains implementation details for the library's internal use.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
#define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#if !defined(_WIN32) || defined (__CYGWIN__)
#include <sched.h>
#endif
#ifdef __MINGW32__
// Including <windows.h> will drag in all the windows32 names. Since
// that can cause user code portability problems, we just declare the
// one needed function here.
extern "C"
__attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
#endif
namespace __gnu_parallel
{
template<typename _Tp>
inline _Tp
__add_omp(volatile _Tp* __ptr, _Tp __addend)
{
int64_t __res;
#pragma omp critical
{
__res = *__ptr;
*(__ptr) += __addend;
}
return __res;
}
/** @brief Add a value to a variable, atomically.
*
* @param __ptr Pointer to a signed integer.
* @param __addend Value to add.
*/
template<typename _Tp>
inline _Tp
__fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
{
if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
return __add_omp(__ptr, __addend);
}
template<typename _Tp>
inline bool
__cas_omp(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
bool __res = false;
#pragma omp critical
{
if (*__ptr == __comparand)
{
*__ptr = __replacement;
__res = true;
}
}
return __res;
}
/** @brief Compare-and-swap
*
* Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* @param __ptr Pointer to signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
template<typename _Tp>
inline bool
__compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
false, __ATOMIC_ACQ_REL,
__ATOMIC_RELAXED);
return __cas_omp(__ptr, __comparand, __replacement);
}
/** @brief Yield control to another thread, without waiting for
* the end of the time slice.
*/
inline void
__yield()
{
#if defined (_WIN32) && !defined (__CYGWIN__)
Sleep(0);
#else
sched_yield();
#endif
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
|
Example_task_dep.11.c | /*
* @@name: task_dep.11c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_5.0
*/
#include<stdio.h>
void set_an_element(int *p, int val) {
*p = val;
}
void print_all_elements(int *v, int n) {
int i;
for (i = 0; i < n; ++i) {
printf("%d, ", v[i]);
}
printf("\n");
}
void parallel_computation(int n) {
int v[n];
#pragma omp parallel
#pragma omp single
{
int i;
for (i = 0; i < n; ++i)
#pragma omp task depend(out: v[i])
set_an_element(&v[i], i);
#pragma omp task depend(iterator(it = 0:n), in: v[it])
// #pragma omp task depend(in: v[0:n]) Violates Array section restriction.
print_all_elements(v, n);
}
}
|
batchnorm_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "batchnorm_kernel_arm.h"
#include <arm_neon.h>
static void batchnorm_kernel(int i, int id, void* data, const float* input, float* output, float* scale_mean,
float* scale_var, int channel_size, int num_thread)
{
int step = (( int* )data)[0];
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < step; c++)
{
int cur_c = id * step + c;
float s_mean = scale_mean[cur_c];
float s_var = scale_var[cur_c];
float32x4_t _mean = vdupq_n_f32(s_mean);
float32x4_t _var = vdupq_n_f32(s_var);
int offset = cur_c * channel_size;
const float* input_ptr = input + offset;
float* output_ptr = output + offset;
for (int l = 0; l < (channel_size & -4); l += 4)
{
float32x4_t _input = vld1q_f32(input_ptr);
vst1q_f32(output_ptr, vmlaq_f32(_mean, _input, _var));
input_ptr += 4;
output_ptr += 4;
}
for (int l = channel_size & ~3; l < channel_size; l++)
{
*output_ptr = (*input_ptr) * s_var + s_mean;
input_ptr++;
output_ptr++;
}
}
}
int batchnorm_run(struct tensor* output_tensor, struct tensor* input_tensor, float* scale_mean,
float* scale_var_inv, int num_thread)
{
int batch_number = input_tensor->dims[0];
int channel_num = input_tensor->dims[1];
int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
int img_size = channel_num * channel_size;
const float* input = ( const float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* scale_mean_t = ( float* )scale_mean;
float* scale_var_inv_t = ( float* )scale_var_inv;
/* only use mean and var */
for (int i = 0; i < batch_number; i++)
{
const float* cur_input = input + i * img_size;
float* cur_output = output + i * img_size;
batchnorm_kernel(0, 0, &channel_num, cur_input, cur_output, scale_mean_t, scale_var_inv_t, channel_size,
num_thread);
}
return 0;
}
|
error.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB LU code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <stdio.h>
#include <math.h>
#include "applu.incl"
//---------------------------------------------------------------------
//
// compute the solution error
//
//---------------------------------------------------------------------
void error()
{
//---------------------------------------------------------------------
// local variables
//---------------------------------------------------------------------
int i, j, k, m;
double tmp;
double u000ijk[5];
double errnm_local[5];
for (m = 0; m < 5; m++) {
errnm[m] = 0.0;
}
#pragma omp parallel default(shared) private(i,j,k,m,tmp,u000ijk,errnm_local)
{
for (m = 0; m < 5; m++) {
errnm_local[m] = 0.0;
}
#pragma omp for nowait
for (k = 1; k < nz-1; k++) {
for (j = jst; j < jend; j++) {
for (i = ist; i < iend; i++) {
exact( i, j, k, u000ijk );
for (m = 0; m < 5; m++) {
tmp = ( u000ijk[m] - u[k][j][i][m] );
errnm_local[m] = errnm_local[m] + tmp * tmp;
}
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp atomic
errnm[m] += errnm_local[m];
}
} //end parallel
for (m = 0; m < 5; m++) {
errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
/*
printf(" \n RMS-norm of error in soln. to first pde = %12.5E\n"
" RMS-norm of error in soln. to second pde = %12.5E\n"
" RMS-norm of error in soln. to third pde = %12.5E\n"
" RMS-norm of error in soln. to fourth pde = %12.5E\n"
" RMS-norm of error in soln. to fifth pde = %12.5E\n",
errnm[0], errnm[1], errnm[2], errnm[3], errnm[4]);
*/
}
|
dnnl_requantize-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* \file dnnl_requantize-inl.h
* \brief
* \author Jin Huang, Xinyu Chen
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_REQUANTIZE_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_REQUANTIZE_INL_H_
#if MXNET_USE_ONEDNN == 1
#include <algorithm>
#include <string>
#include <vector>
#include "../../nn/dnnl/dnnl_base-inl.h"
#include "../requantize-inl.h"
namespace mxnet {
namespace op {
template <typename DstType>
static void DNNLRequantizeForwardKer(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs,
const float real_range) {
using namespace mshadow;
using namespace mxnet_op;
using red::limits::MaxValue;
using red::limits::MinValue;
typedef int32_t SrcDType;
// check shapes
size_t i_dim = inputs[0].shape().ndim();
size_t o_dim = outputs[0].shape().ndim();
CHECK_EQ(i_dim, o_dim);
float first_quantized_range = MinAbs(MinValue<SrcDType>(), MaxValue<SrcDType>());
float first_real_range = MaxAbs(*inputs[1].data().dptr<float>(), *inputs[2].data().dptr<float>());
float first_scale = first_real_range / first_quantized_range;
float second_real_range = real_range;
float second_quantized_range = 0.f;
if (std::is_same<DstType, int8_t>::value) {
second_quantized_range = MinAbs(MaxValue<DstType>(), MinValue<DstType>());
*outputs[1].data().dptr<float>() = -second_real_range;
*outputs[2].data().dptr<float>() = second_real_range;
} else if (std::is_same<DstType, uint8_t>::value) {
second_quantized_range = MaxValue<DstType>();
*outputs[1].data().dptr<float>() = 0.f;
*outputs[2].data().dptr<float>() = second_real_range;
} else {
LOG(FATAL) << "Unsupported requantize output type";
}
float second_scale = second_quantized_range / second_real_range;
float scale = first_scale * second_scale;
dnnl::primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
dnnl::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
NDArray in_buffer = inputs[0];
if (inputs[0].IsView() && inputs[0].IsDNNLData())
in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetDNNLData();
auto i_desc = i_mem->get_desc();
auto o_desc = i_desc;
o_desc.data.data_type = get_dnnl_type_t<DstType>();
auto reorder_pd = dnnl::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc, attr);
auto o_mem = CreateDNNLMem(outputs[0], o_desc, req[0]);
DNNLStream::Get()->RegisterPrimArgs(dnnl::reorder(reorder_pd),
{{DNNL_ARG_FROM, *i_mem}, {DNNL_ARG_TO, *o_mem.second}});
CommitOutput(outputs[0], o_mem);
DNNLStream::Get()->Submit();
}
static void DNNLRequantizeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
using red::limits::MaxValue;
using red::limits::MinValue;
typedef int32_t SrcDType;
typedef int8_t DstDType;
const RequantizeParam& param = nnvm::get<RequantizeParam>(attrs.parsed);
float real_range;
// Model is calibrated
if (param.min_calib_range.has_value() && param.max_calib_range.has_value()) {
real_range = MaxAbs(param.min_calib_range.value(), param.max_calib_range.value());
// Model is not calibrated
} else {
NDArray in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<SrcDType>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
SrcDType data_min = MaxValue<SrcDType>();
SrcDType data_max = MinValue<SrcDType>();
std::vector<SrcDType> data_maxs(nthreads, data_max);
std::vector<SrcDType> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid])
data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid])
data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max)
data_max = data_maxs[i];
if (data_mins[i] < data_min)
data_min = data_mins[i];
}
float src_range = MinAbs(MinValue<SrcDType>(), MaxValue<SrcDType>());
SrcDType data_range = MaxAbs(data_min, data_max);
float data_scale = MaxAbs(*inputs[1].data().dptr<float>(), *inputs[2].data().dptr<float>());
real_range = data_range * data_scale / src_range;
}
auto out_type = GetQuantizeOutputType(param);
if (out_type == mshadow::kUint8) {
DNNLRequantizeForwardKer<uint8_t>(attrs, ctx, inputs, req, outputs, real_range);
} else if (out_type == mshadow::kInt8) {
DNNLRequantizeForwardKer<int8_t>(attrs, ctx, inputs, req, outputs, real_range);
} else {
LOG(FATAL) << "oneDNN requantize op only supports int8 and uint8 as output type";
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_REQUANTIZE_INL_H_
|
omp_parallel_for_firstprivate.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_parallel_for_firstprivate()
{
int sum ;
int i2;
int i;
int known_sum;
sum=0;
i2=3;
#pragma omp parallel for reduction(+:sum) private(i) firstprivate(i2)
for (i = 1; i <= LOOPCOUNT; i++) {
sum = sum + (i + i2);
}
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 + i2 * LOOPCOUNT;
return (known_sum == sum);
} /* end of check_parallel_for_fistprivate */
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_parallel_for_firstprivate()) {
num_failed++;
}
}
return num_failed;
}
|
GB_binop__minus_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_int8
// A.*B function (eWiseMult): GB_AemultB__minus_int8
// A*D function (colscale): GB_AxD__minus_int8
// D*A function (rowscale): GB_DxB__minus_int8
// C+=B function (dense accum): GB_Cdense_accumB__minus_int8
// C+=b function (dense accum): GB_Cdense_accumb__minus_int8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_int8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_int8
// C=scalar+B GB_bind1st__minus_int8
// C=scalar+B' GB_bind1st_tran__minus_int8
// C=A+scalar GB_bind2nd__minus_int8
// C=A'+scalar GB_bind2nd_tran__minus_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT8 || GxB_NO_MINUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__minus_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
morn_wave_FFT.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "morn_wave.h"
#define FFTCACL0(re0,re1) {\
register float re_mul=re1;\
re1=re0-re_mul;\
re0=re0+re_mul;\
}
#define FFTCACL1(re0,im0,re1,im1,re2,im2) {\
register float re_mul=Wre[k]*re1-Wim[k]*im1;\
register float im_mul=Wim[k]*re1+Wre[k]*im1;\
re2=re0-re_mul;im2=-im0+im_mul;\
re0=re0+re_mul;im0= im0+im_mul;\
}
#define FFTCACL2(im0,re1) {\
im0=-re1;\
}
void WaveFFT8(float *fft_re,float *fft_im,float d0,float d4,float d2,float d6,float d1,float d5,float d3,float d7)
{
float data15 = d1-d5;float data37=d3-d7;
float a = 0.70710678118654752440084436210485*(data15-data37);
float b = 0.70710678118654752440084436210485*(data15+data37);
float c = d0-d4;float d = d2-d6;float e = d0+d4;float f = d2+d6;float g = d1+d5;float h = d3+d7;float i=e+f;float j=g+h;
fft_re[0]= i+j; fft_im[0]= 0;
fft_re[1]= c+a; fft_im[1]=-d-b;
fft_re[2]= e-f; fft_im[2]= h-g;
fft_re[3]= c-a; fft_im[3]= d-b;
fft_re[4]= i-j; fft_im[4]= 0;
}
struct HandleWaveFFT {
int size;
float *Wre;
float *Wim;
int *order;
}HandleWaveFFT;
void endWaveFFT(void *info)
{
struct HandleWaveFFT *handle=(struct HandleWaveFFT *)info;
if(handle->Wre != NULL)mFree(handle->Wre);
if(handle->Wim != NULL)mFree(handle->Wim);
if(handle->order!=NULL)mFree(handle->order);
}
#define HASH_WaveFFT 0xf197b3ec
void mWaveFFT(MWave *src,MWave *fft)
{
int i,j,k,n;
mException((INVALID_WAVE(src)),EXIT,"invalid input");
// mException((mInfoGet(&(src->info),"wave_type") != MORN_WAVE_TD),EXIT,"invalid input");
int N;
MHandle *hdl=mHandle(src,WaveFFT);
struct HandleWaveFFT *handle = (struct HandleWaveFFT *)(hdl->handle);
if(hdl->valid == 0)
{
mException((src->size<=4),EXIT,"invalid input");
k=1;while(src->size>(2<<k))k=k+1; N=(2<<k);
if(handle->size != N)
{
handle->size = N;
if(handle->order!=NULL) mFree(handle->order);handle->order=(int *)mMalloc(N*sizeof(int));
N=N>>1;handle->order[0]=0;j=1;
for(k=N;k>0;k=k>>1) {for(i=0;i<j;i++) handle->order[i+j]=handle->order[i]+k; j=j+j;}
if(handle->Wre!=NULL) mFree(handle->Wre);handle->Wre=(float *)mMalloc(N*sizeof(float));
if(handle->Wim!=NULL) mFree(handle->Wim);handle->Wim=(float *)mMalloc(N*sizeof(float));
double n_pi = MORN_PI/((double)N);double thta = n_pi;
handle->Wre[0] = 1.0f; handle->Wim[0] = 0.0f;
for(k=1;k<N;k++)
{
handle->Wre[k] = (float)cos(thta);
handle->Wim[k] = 0.0f-(float)sin(thta);
thta = thta + n_pi;
}
}
hdl->valid = 1;
}
N = handle->size;
float *Wre = handle->Wre; float *Wim = handle->Wim;
MWave *p=fft;
if((fft==NULL)||(fft == src)) fft = mWaveCreate(((src->channel)<<1),N,NULL);
else mWaveRedefine(fft,((src->channel)<<1),N,fft->data);
int wave_type=MORN_WAVE_FD; mPropertyWrite(fft,"wave_type",&wave_type,sizeof(int));
int normalize=MORN_NOT_NORMALIZED;mPropertyWrite(fft,"normalize",&normalize,sizeof(float));
N=(N>>1);
for(int cn=0;cn<src->channel;cn++)
{
float *FFTDataRe = fft->data[(cn<<1)];
float *FFTDataIm = fft->data[(cn<<1)+1];
float *data = src->data[cn];
for(i=0;i<N+N;i+=8)
{
int n0=handle->order[i ];int n1=handle->order[i+1];int n2=handle->order[i+2];int n3=handle->order[i+3];
int n4=handle->order[i+4];int n5=handle->order[i+5];int n6=handle->order[i+6];int n7=handle->order[i+7];
WaveFFT8(FFTDataRe+i,FFTDataIm+i,data[n0],(n1>src->size)?0:data[n1],data[n2],(n3>src->size)?0:data[n3],
data[n4],(n5>src->size)?0:data[n5],data[n6],(n7>src->size)?0:data[n7]);
}
for(n=8;n<=N;n=(n<<1))for(int j=0;j<N+N;j=j+n+n)
{
FFTCACL0(FFTDataRe[j],FFTDataRe[j+n]);
for(i=1,k=N/n;i<(n>>1);i++,k=k+N/n)
{
FFTCACL1(FFTDataRe[j+i],FFTDataIm[j+i],FFTDataRe[j+n+i],FFTDataIm[j+n+i],FFTDataRe[j+n-i],FFTDataIm[j+n-i]);
}
FFTCACL2(FFTDataIm[j+i],FFTDataRe[j+n+i]);
}
for(int i=N+1;i<N+N;i++) {FFTDataRe[i]=FFTDataRe[N+N-i];FFTDataIm[i]=0-FFTDataIm[N+N-i];}
}
if(p!=fft) {mWaveExchange(src,fft);mWaveRelease(fft);}
}
#define FFTCACL(re0,im0,re1,im1) {\
register float re_mul=Wre[k]*re1-Wim[k]*im1;\
register float im_mul=Wim[k]*re1+Wre[k]*im1;\
re1=re0-re_mul;im1=im0-im_mul;\
re0=re0+re_mul;im0=im0+im_mul;\
}
/*
void mWaveFFT(MWave *src,MWave *fft)
{
int i,j,k,n;
mException((INVALID_WAVE(src)),EXIT,"invalid input");
// mException((mInfoGet(&(src->info),"wave_type") != MORN_WAVE_TD),EXIT,"invalid input");
int N;
MHandle *hdl; ObjectHandle(src,WaveFFT,hdl);
struct HandleWaveFFT *handle = hdl->handle;
if(hdl->valid == 0)
{
mException((src->size<=4),EXIT,"invalid input");
k=1;while(src->size>(2<<k))k=k+1; N=(2<<k);
if(handle->size != N)
{
handle->size = N;
if(handle->order!=NULL) mFree(handle->order);handle->order=mMalloc(N*sizeof(int));
N=N>>1;handle->order[0]=0;j=1;
for(k=N;k>0;k=k>>1) {for(i=0;i<j;i++) handle->order[i+j]=handle->order[i]+k; j=j+j;}
if(handle->Wre!=NULL) mFree(handle->Wre);handle->Wre=mMalloc(N*sizeof(float));
if(handle->Wim!=NULL) mFree(handle->Wim);handle->Wim=mMalloc(N*sizeof(float));
double n_pi = MORN_PI/((double)N);double thta = n_pi;
handle->Wre[0] = 1.0f; handle->Wim[0] = 0.0f;
for(k=1;k<N;k++)
{
handle->Wre[k] = (float)cos(thta);
handle->Wim[k] = 0.0f-(float)sin(thta);
thta = thta + n_pi;
}
}
hdl->valid = 1;
}
N = handle->size;
float *Wre = handle->Wre; float *Wim = handle->Wim;
MWave *p=fft;
if((fft==NULL)||(fft == src)) fft = mWaveCreate(((src->channel)<<1),N,NULL);
else mWaveRedefine(fft,((src->channel)<<1),N,fft->data);
fft->info = src->info;
mInfoSet(&(fft->info),"wave_type",MORN_WAVE_FD);
mInfoSet(&(fft->info),"normalize",MORN_NOT_NORMALIZED);
N=(N>>1);
for(int cn=0;cn<src->channel;cn++)
{
float *FFTDataRe = fft->data[(cn<<1)];
float *FFTDataIm = fft->data[(cn<<1)+1];
float *data = src->data[cn];
for(i=0;i<N+N;i+=8)
{
int n0=handle->order[i ];int n1=handle->order[i+1];int n2=handle->order[i+2];int n3=handle->order[i+3];
int n4=handle->order[i+4];int n5=handle->order[i+5];int n6=handle->order[i+6];int n7=handle->order[i+7];
WaveFFT8(FFTDataRe+i,FFTDataIm+i,(n0>src->size)?0:data[n0],(n1>src->size)?0:data[n1],(n2>src->size)?0:data[n2],(n3>src->size)?0:data[n3],
(n4>src->size)?0:data[n4],(n5>src->size)?0:data[n5],(n6>src->size)?0:data[n6],(n7>src->size)?0:data[n7]);
}
for(n=8;n<=N;n=(n<<1))
{
for(j=0;j<(N<<1);j=j+(n<<1))
{
for(i=0,k=0;i<=(n>>1);i++,k=k+N/n)
FFTCACL(FFTDataRe[j+i],FFTDataIm[j+i],FFTDataRe[j+i+n],FFTDataIm[j+i+n]);
for(;i<n;i++)
{
FFTDataRe[j+i] = FFTDataRe[j+n+n-i];FFTDataIm[j+i] =-FFTDataIm[j+n+n-i];
FFTDataRe[j+i+n] = FFTDataRe[j+n-i];FFTDataIm[j+i+n] =-FFTDataIm[j+n-i];
}
}
}
}
if(p!=fft) {mWaveExchange(src,fft);mWaveRelease(fft);}
}
*/
#define HandleWaveIFFT HandleWaveFFT
#define endWaveIFFT endWaveFFT
#define HASH_WaveIFFT 0x81f00b75
void mWaveIFFT(MWave *fft,MWave *dst)
{
int i,j,k,n;
mException((INVALID_WAVE(fft)),EXIT,"invalid input");
int wave_type=0;mPropertyRead(fft,"wave_type",&wave_type);
mException((wave_type != MORN_WAVE_FD),EXIT,"invalid input");
int N;
MHandle *hdl=mHandle(fft,WaveIFFT);
struct HandleWaveIFFT *handle = (struct HandleWaveIFFT *)(hdl->handle);
if(hdl->valid == 0)
{
N=fft->size;mException((N<4)||((N&(N-1))!=0),EXIT,"invalid input");
if(handle->size != N)
{
handle->size = N;
if(handle->order!=NULL) mFree(handle->order);handle->order=(int *)mMalloc(N*sizeof(int));
N=N>>1;handle->order[0]=0;j=1;
for(k=N;k>0;k=k>>1) {for(i=0;i<j;i++) handle->order[i+j]=handle->order[i]+k; j=j+j;}
if(handle->Wre!=NULL) mFree(handle->Wre);handle->Wre=(float *)mMalloc(N*sizeof(float));
if(handle->Wim!=NULL) mFree(handle->Wim);handle->Wim=(float *)mMalloc(N*sizeof(float));
double n_pi = MORN_PI/((double)N);double thta = n_pi;
handle->Wre[0] = 1.0f; handle->Wim[0] = 0.0f;
for(k=1;k<N;k++)
{
handle->Wre[k] = (float)cos(thta);
handle->Wim[k] = (float)sin(thta);
thta = thta + n_pi;
}
}
hdl->valid = 1;
}
N = handle->size;
float *Wre = handle->Wre; float *Wim = handle->Wim;
MWave *p=dst;
if((dst==NULL)||(dst==fft)) dst = mWaveCreate(fft->channel,N,NULL);
else mWaveRedefine(dst,fft->channel,N,dst->data);
// dst->info = fft->info;
wave_type=MORN_WAVE_TD; mPropertyWrite(dst,"wave_type",&wave_type,sizeof(int));
int normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float));
N=(N>>1);
for(int cn=0;cn<dst->channel;cn+=2)
{
float *FFTDataRe = dst->data[cn];
float *FFTDataIm = dst->data[cn+1];
float *fft_data_re=fft->data[cn];
float *fft_data_im=fft->data[cn+1];
for(i=0;i<N+N;i++)
{
int n=handle->order[i];
FFTDataRe[i]=fft_data_re[n];FFTDataIm[i]=fft_data_im[n];
}
for(n=1;n<=N;n=(n<<1))
for(j=0;j<(N<<1);j=j+(n<<1))
for(i=0,k=0;i<n;i++,k=k+N/n)
FFTCACL(FFTDataRe[j+i],FFTDataIm[j+i],FFTDataRe[j+i+n],FFTDataIm[j+i+n]);
for(i=0;i<N+N;i++) dst->data[cn>>1][i] = FFTDataRe[i]/((float)(N+N));
}
dst->channel=dst->channel>>1;
if(p!=dst) {mWaveExchange(fft,dst);mWaveRelease(dst);}
}
/*
void mWaveIFFT0(MWave *fft,MWave *dst)
{
int i,j,k,n;
int cn;
int wave_size;
double *DstDataRe;
double *DstDataIm;
int N;
double *Wre,*Wim;
int out_valid;
mException((INVALID_WAVE(fft)),EXIT,"invalid input");
mException((mInfoGet(&(fft->info),"wave_type") != MORN_WAVE_FD),EXIT,"invalid input");
wave_size = fft->size;
N = wave_size;
while((N&0x01)==0)
N = N>>1;
mException((N!=1),EXIT,"invalid input data");
N = wave_size;
if((INVALID_POINTER(dst))||(dst == fft))
{
out_valid = 0;
dst = mWaveCreate(((fft->channel)>>1),N,NULL);
}
else
{
out_valid = 1;
mWaveRedefine(dst,((fft->channel)>>1),N,dst->data);
}
dst->info = fft->info;
mInfoSet(&(dst->info),"wave_type",MORN_WAVE_TD);
DstDataRe = (double *)mMalloc(N*sizeof(double));
DstDataIm = (double *)mMalloc(N*sizeof(double));
N=(N>>1);
Wre = (double *)mMalloc(N*sizeof(double));
Wim = (double *)mMalloc(N*sizeof(double));
for(k=0;k<N;k++)
{
Wre[k] = cos((((double)(k))/((double)(N)))*MORN_PI);
Wim[k] = sin((((double)(k))/((double)(N)))*MORN_PI);
}
for(cn=0;cn<fft->channel;cn=cn+2)
{
DstDataRe[0] = fft->data[cn][0];
DstDataRe[N] = fft->data[cn][1];
DstDataIm[0] = fft->data[cn+1][0];
DstDataIm[N] = fft->data[cn+1][1];
for(i=1,j=N;i<N;i++)
{
printf("i is %d,j is %d\n",i,j);
printf("i is %d,j is %d\n",i+N,j+1);
DstDataRe[i] = fft->data[cn][j];
DstDataRe[i+N] = fft->data[cn][j+1];
DstDataIm[i] = fft->data[cn+1][j];
DstDataIm[i+N] = fft->data[cn+1][j+1];
k=N;
while(k<=j)
{
j=j-k;
k=k/2;
}
j=j+k;
}
for(i=0;i<N+N;i++)
printf("data is %f+%fi\n",DstDataRe[i],DstDataIm[i]);
for(n=1;n<=N;n=(n<<1))
{
//#pragma omp parallel for
for(j=0;j<(N<<1);j=j+(n<<1))
for(i=0,k=0;i<n;i++,k=k+N/n)
FFTCACL(DstDataRe[j+i],DstDataIm[j+i],DstDataRe[j+i+n],DstDataIm[j+i+n]);
}
for(i=0;i<wave_size;i++)
dst->data[cn>>1][i] = (float)(DstDataRe[i]/((double)wave_size));
}
mFree(Wre);
mFree(Wim);
mFree(DstDataRe);
mFree(DstDataIm);
if(!out_valid)
{
mWaveExchange(fft,dst);
mWaveRelease(dst);
}
}
*/
void mWavePowerSpectrum(MWave *fft,MWave *ps,int mode)
{
int wav_size;
float *re,*im;
float *ps_data;
int i,j;
if(mode == MORN_DEFAULT)
mode = MORN_SQUAR_POWERS;
mException(((mode<1)||(mode>3)),EXIT,"invalid input");
mException((INVALID_WAVE(fft)),EXIT,"invalid input");
int wave_type=0;mPropertyRead(fft,"wave_type",&wave_type);
mException((wave_type != MORN_WAVE_FD),EXIT,"invalid input");
wav_size = (fft->size)>>1;
if(INVALID_POINTER(ps))
ps = fft;
mWaveRedefine(ps,((fft->channel)>>1),wav_size,ps->data);
// ps->info = fft->info;
wave_type=MORN_WAVE_PS; mPropertyWrite(ps,"wave_type",&wave_type,sizeof(int));
int normalize=MORN_NOT_NORMALIZED;mPropertyWrite(ps,"normalize",&normalize,sizeof(float));
if(mode == MORN_SQUAR_POWERS)
{
for(j=0;j<ps->channel;j++)
{
re = fft->data[(j<<1)];
im = fft->data[(j<<1)+1];
ps_data = ps->data[j];
for(i=0;i<wav_size;i++)
ps_data[i] = re[i]*re[i] + im[i]*im[i];
}
}
else if(mode == MORN_POWERS)
{
for(j=0;j<ps->channel;j++)
{
re = fft->data[(j<<1)];
im = fft->data[(j<<1)+1];
ps_data = ps->data[j];
for(i=0;i<wav_size;i++)
ps_data[i] = (float)sqrt((double)(re[i]*re[i] + im[i]*im[i]));
}
}
else if(mode == MORN_LOG_POWERS)
{
for(j=0;j<ps->channel;j++)
{
re = fft->data[(j<<1)];
im = fft->data[(j<<1)+1];
ps_data = ps->data[j];
for(i=0;i<wav_size;i++)
ps_data[i] = (log10((double)(re[i]*re[i] + im[i]*im[i])))/2.0;
}
}
}
void mWaveFrequencyComponent(MWave *src,float frequency,float *component)
{
float src_frequency;mException(mPropertyRead(src,"frequency",&src_frequency)==NULL,EXIT,"invalid input");
float c = (MORN_PI+MORN_PI)*frequency/src_frequency;
for(int cn=0;cn<src->channel;cn++)
{
float *data = src->data[cn];
float e = 0;
float re = data[0];
float im = 0.0f;
for(int i=1;i<src->size;i++)
{
e = e+c;
re = re + data[i]*cos(e);
im = im - data[i]*sin(e);
}
component[cn] =re*re+im*im;
}
}
struct HandleWaveFrequencyAnalyse
{
int src_frequency;
int num;
float *frequency;
MMatrix *re_mat;
MMatrix *im_mat;
}HandleWaveFrequencyAnalyse;
#define HASH_WaveFrequencyAnalyse 0x77f2456d
void endWaveFrequencyAnalyse(void *info)
{
struct HandleWaveFrequencyAnalyse *handle = (struct HandleWaveFrequencyAnalyse *)info;
if(handle->frequency!=NULL) mFree(handle->frequency);
if(handle->re_mat != NULL) mMatrixRelease(handle->re_mat);
if(handle->im_mat != NULL) mMatrixRelease(handle->im_mat);
}
void mWaveFrequencyAnalyse(MWave *src,float *frequency,int num,float **component)
{
int cn,i,j;
MHandle *hdl=mHandle(src,WaveFrequencyAnalyse);
struct HandleWaveFrequencyAnalyse *handle = (struct HandleWaveFrequencyAnalyse *)(hdl->handle);
if(hdl->valid ==1)
{
if(num <=0) num = handle->num;
if(INVALID_POINTER(frequency)) frequency = handle->frequency;
float src_frequency=-1;mException(mPropertyRead(src,"frequency",&src_frequency)==NULL,EXIT,"invalid input");
if((handle->src_frequency != src_frequency)&&(src_frequency >0))
hdl->valid = 0;
else if(frequency != handle->frequency)
if(memcmp(handle->frequency,frequency,num*sizeof(float))!=0)
hdl->valid = 0;
}
if(hdl->valid == 0)
{
mException((num<=0)||(INVALID_POINTER(frequency)),EXIT,"invalid input");
handle->src_frequency=-1;mPropertyRead(src,"frequency",&(handle->src_frequency));
mException((handle->src_frequency<=0),EXIT,"invalid input");
if(num>handle->num) {mFree(handle->frequency);handle->frequency=NULL;}
if(handle->frequency==NULL) handle->frequency=(float *)mMalloc(num*sizeof(float));
handle->num = num;
if(handle->re_mat == NULL) handle->re_mat = mMatrixCreate(num,src->size,NULL);
else mMatrixRedefine(handle->re_mat,num,src->size,NULL);
if(handle->im_mat == NULL) handle->im_mat = mMatrixCreate(num,src->size,NULL);
else mMatrixRedefine(handle->im_mat,num,src->size,NULL);
memcpy(handle->frequency,frequency,num*sizeof(float));
for(j=0;j<num;j++)
{
double c = ((double)(MORN_PI+MORN_PI)*(handle->frequency[j]))/((double)(handle->src_frequency));
double e = 0.0;
handle->re_mat->data[j][0] = 1.0f;
handle->im_mat->data[j][0] = 0.0f;
for(i=1;i<src->size;i++)
{
e = e+c;
handle->re_mat->data[j][i] = cos(e);
handle->im_mat->data[j][i] = 0.0f - sin(e);
}
}
hdl->valid = 1;
}
MMatrix *re_mat = handle->re_mat;
MMatrix *im_mat = handle->im_mat;
for(cn=0;cn<src->channel;cn++)
{
for(j=0;j<num;j++)
{
float re = 0.0f;
float im = 0.0f;
for(i=0;i<src->size;i++)
{
re = re + src->data[cn][i]*re_mat->data[j][i];
im = im + src->data[cn][i]*im_mat->data[j][i];
}
component[cn][j] =re*re+im*im;
}
}
}
/*
void mWaveFrequencyAnalyse2(MWave *src,float *frequency,int num,float **component)
{
float *e,*c;
int i,j,cn;
float *re,*im;
float *data;
e = (float *)mMalloc(num*sizeof(float));
c = (float *)mMalloc(num*sizeof(float));
re = (float *)mMalloc(num*sizeof(float));
im = (float *)mMalloc(num*sizeof(float));
for(cn=0;cn<src->channel;cn++)
{
data = src->data[cn];
for(j=0;j<num;j++)
{
c[j] = (MORN_PI+MORN_PI)*frequency[j]/((float)(src->info.frequency));
e[j] = 0;
re[j] = data[0];
im[j] = 0.0;
}
for(i=1;i<src->size;i++)
for(j=0;j<num;j++)
{
e[j] = e[j]+c[j];
re[j] = re[j] + data[i]*cos(e[j]);
im[j] = im[j] - data[i]*sin(e[j]);
}
for(j=0;j<num;j++)
{
// printf("re[j] is %f,im[j] is %f\n",re[j],im[j]);
component[cn][j] =re[j]*re[j]+im[j]*im[j];
}
}
mFree(e);
mFree(c);
mFree(re);
mFree(im);
}
*/
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_binop__eq_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_uint8
// A.*B function (eWiseMult): GB_AemultB__eq_uint8
// A*D function (colscale): GB_AxD__eq_uint8
// D*A function (rowscale): GB_DxB__eq_uint8
// C+=B function (dense accum): GB_Cdense_accumB__eq_uint8
// C+=b function (dense accum): GB_Cdense_accumb__eq_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_uint8
// C=scalar+B GB_bind1st__eq_uint8
// C=scalar+B' GB_bind1st_tran__eq_uint8
// C=A+scalar GB_bind2nd__eq_uint8
// C=A'+scalar GB_bind2nd_tran__eq_uint8
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT8 || GxB_NO_EQ_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__eq_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GrB_Matrix_serialize.c | //------------------------------------------------------------------------------
// GrB_Matrix_serialize: copy a matrix into a serialized array of bytes
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// serialize a GrB_Matrix into a blob of bytes
// This method is similar to GxB_Matrix_serialize. In contrast with the GrB*
// method, this method requires the user application to allocate the blob
// first, which must be non-NULL on input. The required size of the blob is
// computed by GrB_Matrix_serializeSize. Example usage:
/*
void *blob = NULL ;
GrB_Index blob_size = 0 ;
GrB_Matrix A, B = NULL ;
// construct a matrix A, then serialized it:
GrB_Matrix_serializeSize (&blob_size, A) ; // loose upper bound
blob = malloc (blob_size) ; // user mallocs the blob
GrB_Matrix_serialize (blob, &blob_size, A) ; // returns actual size
blob = realloc (blob, blob_size) ; // user can shrink the blob
GrB_Matrix_deserialize (&B, atype, blob, blob_size) ;
free (blob) ; // user frees the blob
*/
#include "GB.h"
#include "GB_serialize.h"
GrB_Info GrB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void *blob, // the blob, already allocated in input
// input/output:
GrB_Index *blob_size_handle, // size of the blob on input. On output,
// the # of bytes used in the blob.
// input:
GrB_Matrix A // matrix to serialize
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Matrix_serialize (blob, &blob_size, A)") ;
GB_BURBLE_START ("GrB_Matrix_serialize") ;
GB_RETURN_IF_NULL (blob) ;
GB_RETURN_IF_NULL (blob_size_handle) ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
// no descriptor, so assume the default method
int method = GxB_DEFAULT ;
// Context will hold the default # of threads, which can be controlled
// by GxB_Global_Option_set.
//--------------------------------------------------------------------------
// serialize the matrix into the preallocated blob
//--------------------------------------------------------------------------
size_t blob_size = (size_t) (*blob_size_handle) ;
GrB_Info info = GB_serialize ((GB_void **) &blob, &blob_size, A, method,
Context) ;
if (info == GrB_SUCCESS)
{
(*blob_size_handle) = (GrB_Index) blob_size ;
}
GB_BURBLE_END ;
#pragma omp flush
return (info) ;
}
|
hello.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char **argv)
{
#pragma omp parallel num_threads(6)
{
printf("Hello, multithreaded world: thread %d of %d\n",
omp_get_thread_num(), omp_get_num_threads());
}
return 0;
}
|
builder.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef BUILDER_H_
#define BUILDER_H_
#include <algorithm>
#include <cinttypes>
#include <fstream>
#include <functional>
#include <type_traits>
#include <utility>
#include "command_line.h"
#include "generator.h"
#include "graph.h"
#include "platform_atomics.h"
#include "pvector.h"
#include "reader.h"
#include "timer.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: BuilderBase
Author: Scott Beamer
Given arguements from the command line (cli), returns a built graph
- MakeGraph() will parse cli and obtain edgelist and call
MakeGraphFromEL(edgelist) to perform actual graph construction
- edgelist can be from file (reader) or synthetically generated (generator)
- Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h)
*/
template <typename NodeID_, typename DestID_ = NodeID_,
typename WeightT_ = NodeID_, bool invert = true>
class BuilderBase {
typedef EdgePair<NodeID_, DestID_> Edge;
typedef pvector<Edge> EdgeList;
const CLBase &cli_;
bool symmetrize_;
bool needs_weights_;
int64_t num_nodes_ = -1;
int64_t num_edges_ = 0;
int64_t base_graph_num_edges_ = 0;
public:
explicit BuilderBase(const CLBase &cli) : cli_(cli) {
symmetrize_ = cli_.symmetrize();
needs_weights_ = !std::is_same<NodeID_, DestID_>::value;
}
DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) {
return e.u;
}
DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) {
return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w);
}
NodeID_ FindMaxNodeID(const EdgeList &el) {
NodeID_ max_seen = 0;
#pragma omp parallel for reduction(max : max_seen)
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
max_seen = std::max(max_seen, e.u);
max_seen = std::max(max_seen, (NodeID_) e.v);
}
return max_seen;
}
// pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) {
// pvector<NodeID_> degrees(num_nodes_, 0);
// #pragma omp parallel for
// for (auto it = el.begin(); it < el.end(); it++) {
// Edge e = *it;
// if (symmetrize_ || (!symmetrize_ && !transpose))
// fetch_and_add(degrees[e.u], 1);
// if (symmetrize_ || (!symmetrize_ && transpose))
// fetch_and_add(degrees[(NodeID_) e.v], 1);
// }
// return degrees;
// }
//
// static
// pvector<SGOffset> PrefixSum(const pvector<NodeID_> °rees) {
// pvector<SGOffset> sums(degrees.size() + 1);
// SGOffset total = 0;
// for (size_t n=0; n < degrees.size(); n++) {
// sums[n] = total;
// total += degrees[n];
// }
// sums[degrees.size()] = total;
// return sums;
// }
//
// static
// pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> °rees) {
// const size_t block_size = 1<<20;
// const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
// pvector<SGOffset> local_sums(num_blocks);
// #pragma omp parallel for
// for (size_t block=0; block < num_blocks; block++) {
// SGOffset lsum = 0;
// size_t block_end = std::min((block + 1) * block_size, degrees.size());
// for (size_t i=block * block_size; i < block_end; i++)
// lsum += degrees[i];
// local_sums[block] = lsum;
// }
// pvector<SGOffset> bulk_prefix(num_blocks+1);
// SGOffset total = 0;
// for (size_t block=0; block < num_blocks; block++) {
// bulk_prefix[block] = total;
// total += local_sums[block];
// }
// bulk_prefix[num_blocks] = total;
// pvector<SGOffset> prefix(degrees.size() + 1);
// #pragma omp parallel for
// for (size_t block=0; block < num_blocks; block++) {
// SGOffset local_total = bulk_prefix[block];
// size_t block_end = std::min((block + 1) * block_size, degrees.size());
// for (size_t i=block * block_size; i < block_end; i++) {
// prefix[i] = local_total;
// local_total += degrees[i];
// }
// }
// prefix[degrees.size()] = bulk_prefix[num_blocks];
// return prefix;
// }
//
// // Removes self-loops and redundant edges
// // Side effect: neighbor IDs will be sorted
// void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose,
// DestID_*** sq_index, DestID_** sq_neighs) {
// pvector<NodeID_> diffs(g.num_nodes());
// DestID_ *n_start, *n_end;
// #pragma omp parallel for private(n_start, n_end)
// for (NodeID_ n=0; n < g.num_nodes(); n++) {
// if (transpose) {
// n_start = g.in_neigh(n).begin();
// n_end = g.in_neigh(n).end();
// } else {
// n_start = g.out_neigh(n).begin();
// n_end = g.out_neigh(n).end();
// }
// std::sort(n_start, n_end);
// DestID_ *new_end = std::unique(n_start, n_end);
// new_end = std::remove(n_start, new_end, n);
// diffs[n] = new_end - n_start;
// }
// pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs);
// *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]];
// *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs);
// #pragma omp parallel for private(n_start)
// for (NodeID_ n=0; n < g.num_nodes(); n++) {
// if (transpose)
// n_start = g.in_neigh(n).begin();
// else
// n_start = g.out_neigh(n).begin();
// std::copy(n_start, n_start+diffs[n], (*sq_index)[n]);
// }
// }
//
// CSRGraph<NodeID_, DestID_, invert> SquishGraph(
// const CSRGraph<NodeID_, DestID_, invert> &g) {
// DestID_ **out_index, *out_neighs, **in_index, *in_neighs;
// SquishCSR(g, false, &out_index, &out_neighs);
// if (g.directed()) {
// if (invert)
// SquishCSR(g, true, &in_index, &in_neighs);
// return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
// out_neighs, in_index,
// in_neighs);
// } else {
// return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
// out_neighs);
// }
// }
//
// /*
// Graph Bulding Steps (for CSR):
// - Read edgelist once to determine vertex degrees (CountDegrees)
// - Determine vertex offsets by a prefix sum (ParallelPrefixSum)
// - Allocate storage and set points according to offsets (GenIndex)
// - Copy edges into storage
// */
// void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index,
// DestID_** neighs) {
// pvector<NodeID_> degrees = CountDegrees(el, transpose);
// pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
// *neighs = new DestID_[offsets[num_nodes_]];
// *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs);
// #pragma omp parallel for
// for (auto it = el.begin(); it < el.end(); it++) {
// Edge e = *it;
// if (symmetrize_ || (!symmetrize_ && !transpose))
// (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v;
// if (symmetrize_ || (!symmetrize_ && transpose))
// (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] =
// GetSource(e);
// }
// }
//
// CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) {
// DestID_ **index = nullptr, **inv_index = nullptr;
// DestID_ *neighs = nullptr, *inv_neighs = nullptr;
// Timer t;
// t.Start();
// if (num_nodes_ == -1)
// num_nodes_ = FindMaxNodeID(el)+1;
// if (needs_weights_)
// Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
// MakeCSR(el, false, &index, &neighs);
// if (!symmetrize_ && invert)
// MakeCSR(el, true, &inv_index, &inv_neighs);
// t.Stop();
// PrintTime("Build Time", t.Seconds());
// if (symmetrize_)
// return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs);
// else
// return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs,
// inv_index, inv_neighs);
// }
CSRGraph<NodeID_, DestID_, invert> MakeGraph() {
EdgeList el;
Timer t;
if (cli_.base_filename() != "") {
Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.base_filename());
el = r.ReadFile(needs_weights_);
}
else {
printf("[%s]: graph input-file not exists, abort!!!\n", __FUNCTION__);
exit(0);
}
base_graph_num_edges_ = el.size();
num_nodes_ = FindMaxNodeID(el) + 1;
if(symmetrize_) {
for(int i=0; i<base_graph_num_edges_; i+=1) {
el.push_back(EdgePair<NodeID_, DestID_>(static_cast<NodeID_>(el[i].v), GetSource(el[i])));
}
base_graph_num_edges_ *= 2;
}
// std::sort(el.begin(), el.end(), [](Edge &a, Edge &b) {
// if(a.u != b.u) return a.u < b.u;
// return (a.v < b.v);
// });
if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
CSRGraph<NodeID_, DestID_, invert> g(el, !symmetrize_, base_graph_num_edges_, num_nodes_);
el.clear();
if (cli_.dynamic_filename() != "") {
Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.dynamic_filename());
el = r.ReadFile(needs_weights_);
}
else {
printf("[%s]: graph input-file not exists, abort!!!\n", __FUNCTION__);
exit(0);
}
if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
size_t dynamic_edges = el.size();
t.Start();
for(uint32_t i=0; i<dynamic_edges; i+=1) {
g.insert(el[i].u, el[i].v.v, el[i].v.w);
if(symmetrize_) {
g.insert(el[i].v.v, el[i].u, el[i].v.w);
}
// if(i && i % 10000000 == 0) cout << "inserted " << (i/1000000) << "M dynamic edges" << endl;
}
t.Stop();
cout << "D-Graph Build Time: " << t.Seconds() << " seconds." << endl;
return g;
}
// Relabels (and rebuilds) graph by order of decreasing degree
static
CSRGraph<NodeID_, DestID_, invert> RelabelByDegree(
const CSRGraph<NodeID_, DestID_, invert> &g) {
if (g.directed()) {
std::cout << "Cannot relabel directed graph" << std::endl;
std::exit(-11);
}
Timer t;
t.Start();
typedef std::pair<int64_t, NodeID_> degree_node_p;
pvector<degree_node_p> degree_id_pairs(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++)
degree_id_pairs[n] = std::make_pair(g.out_degree(n), n);
std::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_node_p>());
pvector<NodeID_> degrees(g.num_nodes());
pvector<NodeID_> new_ids(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[n] = degree_id_pairs[n].first;
new_ids[degree_id_pairs[n].second] = n;
}
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for
for (NodeID_ u=0; u < g.num_nodes(); u++) {
for (NodeID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
std::sort(index[new_ids[u]], index[new_ids[u]+1]);
}
t.Stop();
PrintTime("Relabel", t.Seconds());
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs);
}
};
#endif // BUILDER_H_
|
kdtree_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef RTABMAP_FLANN_KDTREE_INDEX_H_
#define RTABMAP_FLANN_KDTREE_INDEX_H_
#include <algorithm>
#include <map>
#include <cassert>
#include <cstring>
#include <stdarg.h>
#include <cmath>
#include "rtflann/general.h"
#include "rtflann/algorithms/nn_index.h"
#include "rtflann/util/dynamic_bitset.h"
#include "rtflann/util/matrix.h"
#include "rtflann/util/result_set.h"
#include "rtflann/util/heap.h"
#include "rtflann/util/allocator.h"
#include "rtflann/util/random.h"
#include "rtflann/util/saving.h"
namespace rtflann
{
struct KDTreeIndexParams : public IndexParams
{
KDTreeIndexParams(int trees = 4)
{
(*this)["algorithm"] = FLANN_INDEX_KDTREE;
(*this)["trees"] = trees;
}
};
/**
* Randomized kd-tree index
*
* Contains the k-d trees and other information for indexing a set of points
* for nearest-neighbor matching.
*/
template <typename Distance>
class KDTreeIndex : public NNIndex<Distance>
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
typedef NNIndex<Distance> BaseClass;
typedef bool needs_kdtree_distance;
private:
/*--------------------- Internal Data Structures --------------------------*/
struct Node
{
/**
* Dimension used for subdivision.
*/
int divfeat;
/**
* The values used for subdivision.
*/
DistanceType divval;
/**
* Point data
*/
ElementType* point;
/**
* The child nodes.
*/
Node* child1, *child2;
Node(){
child1 = NULL;
child2 = NULL;
}
~Node() {
if (child1 != NULL) { child1->~Node(); child1 = NULL; }
if (child2 != NULL) { child2->~Node(); child2 = NULL; }
}
private:
template<typename Archive>
void serialize(Archive& ar)
{
typedef KDTreeIndex<Distance> Index;
Index* obj = static_cast<Index*>(ar.getObject());
ar & divfeat;
ar & divval;
bool leaf_node = false;
if (Archive::is_saving::value) {
leaf_node = ((child1==NULL) && (child2==NULL));
}
ar & leaf_node;
if (leaf_node) {
if (Archive::is_loading::value) {
point = obj->points_[divfeat];
}
}
if (!leaf_node) {
if (Archive::is_loading::value) {
child1 = new(obj->pool_) Node();
child2 = new(obj->pool_) Node();
}
ar & *child1;
ar & *child2;
}
}
friend struct serialization::access;
};
typedef Node* NodePtr;
typedef BranchStruct<NodePtr, DistanceType> BranchSt;
typedef BranchSt* Branch;
public:
/**
* KDTree constructor
*
* Params:
* inputData = dataset with the input features
* params = parameters passed to the kdtree algorithm
*/
KDTreeIndex(const IndexParams& params = KDTreeIndexParams(), Distance d = Distance() ) :
BaseClass(params, d), mean_(NULL), var_(NULL)
{
trees_ = get_param(index_params_,"trees",4);
}
/**
* KDTree constructor
*
* Params:
* inputData = dataset with the input features
* params = parameters passed to the kdtree algorithm
*/
KDTreeIndex(const Matrix<ElementType>& dataset, const IndexParams& params = KDTreeIndexParams(),
Distance d = Distance() ) : BaseClass(params,d ), mean_(NULL), var_(NULL)
{
trees_ = get_param(index_params_,"trees",4);
setDataset(dataset);
}
KDTreeIndex(const KDTreeIndex& other) : BaseClass(other),
trees_(other.trees_)
{
tree_roots_.resize(other.tree_roots_.size());
for (size_t i=0;i<tree_roots_.size();++i) {
copyTree(tree_roots_[i], other.tree_roots_[i]);
}
}
KDTreeIndex& operator=(KDTreeIndex other)
{
this->swap(other);
return *this;
}
/**
* Standard destructor
*/
virtual ~KDTreeIndex()
{
freeIndex();
}
BaseClass* clone() const
{
return new KDTreeIndex(*this);
}
using BaseClass::buildIndex;
void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
assert(points.cols==veclen_);
size_t old_size = size_;
extendDataset(points);
if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) {
buildIndex();
}
else {
for (size_t i=old_size;i<size_;++i) {
for (int j = 0; j < trees_; j++) {
addPointToTree(tree_roots_[j], i);
}
}
}
}
flann_algorithm_t getType() const
{
return FLANN_INDEX_KDTREE;
}
template<typename Archive>
void serialize(Archive& ar)
{
ar.setObject(this);
ar & *static_cast<NNIndex<Distance>*>(this);
ar & trees_;
if (Archive::is_loading::value) {
tree_roots_.resize(trees_);
}
for (size_t i=0;i<tree_roots_.size();++i) {
if (Archive::is_loading::value) {
tree_roots_[i] = new(pool_) Node();
}
ar & *tree_roots_[i];
}
if (Archive::is_loading::value) {
index_params_["algorithm"] = getType();
index_params_["trees"] = trees_;
}
}
void saveIndex(FILE* stream)
{
serialization::SaveArchive sa(stream);
sa & *this;
}
void loadIndex(FILE* stream)
{
freeIndex();
serialization::LoadArchive la(stream);
la & *this;
}
/**
* Computes the inde memory usage
* Returns: memory used by the index
*/
int usedMemory() const
{
return int(pool_.usedMemory+pool_.wastedMemory+size_*sizeof(int)); // pool memory and vind array memory
}
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const
{
int maxChecks = searchParams.checks;
float epsError = 1+searchParams.eps;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
if (removed_) {
getExactNeighbors<true>(result, vec, epsError);
}
else {
getExactNeighbors<false>(result, vec, epsError);
}
}
else {
if (removed_) {
getNeighbors<true>(result, vec, maxChecks, epsError);
}
else {
getNeighbors<false>(result, vec, maxChecks, epsError);
}
}
}
#ifdef FLANN_KDTREE_MEM_OPT
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams, Heap<BranchSt>* heap) const
{
int maxChecks = searchParams.checks;
float epsError = 1+searchParams.eps;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
if (removed_) {
getExactNeighbors<true>(result, vec, epsError);
}
else {
getExactNeighbors<false>(result, vec, epsError);
}
}
else {
if (removed_) {
getNeighbors<true>(result, vec, maxChecks, epsError, heap);
}
else {
getNeighbors<false>(result, vec, maxChecks, epsError, heap);
}
}
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
if (use_heap) {
//#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
std::vector<double> times(queries.rows);
//#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
std::sort(times.begin(), times.end());
}
delete heap;
return count;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
int count = 0;
if (use_heap) {
//#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
//#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
delete heap;
return count;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen);
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
if (max_neighbors==0) {
//#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=this->size())) {
//#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
//#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
delete heap;
return count;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
int count = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
// just count neighbors
if (params.max_neighbors==0) {
//#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
//#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
//#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
delete heap;
return count;
}
#endif
protected:
/**
* Builds the index
*/
void buildIndexImpl()
{
// Create a permutable array of indices to the input vectors.
std::vector<int> ind(size_);
for (size_t i = 0; i < size_; ++i) {
ind[i] = int(i);
}
mean_ = new DistanceType[veclen_];
var_ = new DistanceType[veclen_];
tree_roots_.resize(trees_);
/* Construct the randomized trees. */
for (int i = 0; i < trees_; i++) {
/* Randomize the order of vectors to allow for unbiased sampling. */
std::random_shuffle(ind.begin(), ind.end());
tree_roots_[i] = divideTree(&ind[0], int(size_) );
}
delete[] mean_;
delete[] var_;
}
void freeIndex()
{
for (size_t i=0;i<tree_roots_.size();++i) {
// using placement new, so call destructor explicitly
if (tree_roots_[i]!=NULL) tree_roots_[i]->~Node();
}
pool_.free();
}
private:
void copyTree(NodePtr& dst, const NodePtr& src)
{
dst = new(pool_) Node();
dst->divfeat = src->divfeat;
dst->divval = src->divval;
if (src->child1==NULL && src->child2==NULL) {
dst->point = points_[dst->divfeat];
dst->child1 = NULL;
dst->child2 = NULL;
}
else {
copyTree(dst->child1, src->child1);
copyTree(dst->child2, src->child2);
}
}
/**
* Create a tree node that subdivides the list of vecs from vind[first]
* to vind[last]. The routine is called recursively on each sublist.
* Place a pointer to this new tree node in the location pTree.
*
* Params: pTree = the new node to create
* first = index of the first vector
* last = index of the last vector
*/
NodePtr divideTree(int* ind, int count)
{
NodePtr node = new(pool_) Node(); // allocate memory
/* If too few exemplars remain, then make this a leaf node. */
if (count == 1) {
node->child1 = node->child2 = NULL; /* Mark as leaf node. */
node->divfeat = *ind; /* Store index of this vec. */
node->point = points_[*ind];
}
else {
int idx;
int cutfeat;
DistanceType cutval;
meanSplit(ind, count, idx, cutfeat, cutval);
node->divfeat = cutfeat;
node->divval = cutval;
node->child1 = divideTree(ind, idx);
node->child2 = divideTree(ind+idx, count-idx);
}
return node;
}
/**
* Choose which feature to use in order to subdivide this set of vectors.
* Make a random choice among those with the highest variance, and use
* its variance as the threshold value.
*/
void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval)
{
memset(mean_,0,veclen_*sizeof(DistanceType));
memset(var_,0,veclen_*sizeof(DistanceType));
/* Compute mean values. Only the first SAMPLE_MEAN values need to be
sampled to get a good estimate.
*/
int cnt = std::min((int)SAMPLE_MEAN+1, count);
for (int j = 0; j < cnt; ++j) {
ElementType* v = points_[ind[j]];
for (size_t k=0; k<veclen_; ++k) {
mean_[k] += v[k];
}
}
DistanceType div_factor = DistanceType(1)/cnt;
for (size_t k=0; k<veclen_; ++k) {
mean_[k] *= div_factor;
}
/* Compute variances (no need to divide by count). */
for (int j = 0; j < cnt; ++j) {
ElementType* v = points_[ind[j]];
for (size_t k=0; k<veclen_; ++k) {
DistanceType dist = v[k] - mean_[k];
var_[k] += dist * dist;
}
}
/* Select one of the highest variance indices at random. */
cutfeat = selectDivision(var_);
cutval = mean_[cutfeat];
int lim1, lim2;
planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
if (lim1>count/2) index = lim1;
else if (lim2<count/2) index = lim2;
else index = count/2;
/* If either list is empty, it means that all remaining features
* are identical. Split in the middle to maintain a balanced tree.
*/
if ((lim1==count)||(lim2==0)) index = count/2;
}
/**
* Select the top RAND_DIM largest values from v and return the index of
* one of these selected at random.
*/
int selectDivision(DistanceType* v)
{
int num = 0;
size_t topind[RAND_DIM];
/* Create a list of the indices of the top RAND_DIM values. */
for (size_t i = 0; i < veclen_; ++i) {
if ((num < RAND_DIM)||(v[i] > v[topind[num-1]])) {
/* Put this element at end of topind. */
if (num < RAND_DIM) {
topind[num++] = i; /* Add to list. */
}
else {
topind[num-1] = i; /* Replace last element. */
}
/* Bubble end value down to right location by repeated swapping. */
int j = num - 1;
while (j > 0 && v[topind[j]] > v[topind[j-1]]) {
std::swap(topind[j], topind[j-1]);
--j;
}
}
}
/* Select a random integer in range [0,num-1], and return that index. */
int rnd = rand_int(num);
return (int)topind[rnd];
}
/**
* Subdivide the list of points by a plane perpendicular on axe corresponding
* to the 'cutfeat' dimension at 'cutval' position.
*
* On return:
* dataset[ind[0..lim1-1]][cutfeat]<cutval
* dataset[ind[lim1..lim2-1]][cutfeat]==cutval
* dataset[ind[lim2..count]][cutfeat]>cutval
*/
void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
{
/* Move vector indices for left subtree to front of list. */
int left = 0;
int right = count-1;
for (;; ) {
while (left<=right && points_[ind[left]][cutfeat]<cutval) ++left;
while (left<=right && points_[ind[right]][cutfeat]>=cutval) --right;
if (left>right) break;
std::swap(ind[left], ind[right]); ++left; --right;
}
lim1 = left;
right = count-1;
for (;; ) {
while (left<=right && points_[ind[left]][cutfeat]<=cutval) ++left;
while (left<=right && points_[ind[right]][cutfeat]>cutval) --right;
if (left>right) break;
std::swap(ind[left], ind[right]); ++left; --right;
}
lim2 = left;
}
/**
* Performs an exact nearest neighbor search. The exact search performs a full
* traversal of the tree.
*/
template<bool with_removed>
void getExactNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, float epsError) const
{
// checkID -= 1; /* Set a different unique ID for each search. */
if (trees_ > 1) {
fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search");
}
if (trees_>0) {
searchLevelExact<with_removed>(result, vec, tree_roots_[0], 0.0, epsError);
}
}
/**
* Performs the approximate nearest-neighbor search. The search is approximate
* because the tree traversal is abandoned after a given number of descends in
* the tree.
*/
template<bool with_removed>
void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError) const
{
int i;
BranchSt branch;
int checkCount = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
DynamicBitset checked(size_);
/* Search once through each tree down to root. */
for (i = 0; i < trees_; ++i) {
searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
}
/* Keep searching other branches from heap until finished. */
while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
}
delete heap;
}
#ifdef FLANN_KDTREE_MEM_OPT
/**
* Performs the approximate nearest-neighbor search. The search is approximate
* because the tree traversal is abandoned after a given number of descends in
* the tree.
*/
template<bool with_removed>
void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError, Heap<BranchSt>* heap) const
{
int i;
BranchSt branch;
int checkCount = 0;
DynamicBitset checked(size_);
heap->clear();
/* Search once through each tree down to root. */
for (i = 0; i < trees_; ++i) {
searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
}
/* Keep searching other branches from heap until finished. */
while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
}
}
#endif
/**
* Search starting from a given node of the tree. Based on any mismatches at
* higher levels, all exemplars below this level must have a distance of
* at least "mindistsq".
*/
template<bool with_removed>
void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck,
float epsError, Heap<BranchSt>* heap, DynamicBitset& checked) const
{
if (result_set.worstDist()<mindist) {
// printf("Ignoring branch, too far\n");
return;
}
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL)&&(node->child2 == NULL)) {
int index = node->divfeat;
if (with_removed) {
if (removed_points_.test(index)) return;
}
/* Do not check same node more than once when searching multiple trees. */
if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return;
checked.set(index);
checkCount++;
DistanceType dist = distance_(node->point, vec, veclen_);
result_set.addPoint(dist,index);
return;
}
/* Which child branch should be taken first? */
ElementType val = vec[node->divfeat];
DistanceType diff = val - node->divval;
NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
/* Create a branch record for the branch not taken. Add distance
of this feature boundary (we don't attempt to correct for any
use of this feature in a parent node, which is unlikely to
happen and would have only a small effect). Don't bother
adding more branches to heap after halfway point, as cost of
adding exceeds their value.
*/
DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
// if (2 * checkCount < maxCheck || !result.full()) {
if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) {
heap->insert( BranchSt(otherChild, new_distsq) );
}
/* Call recursively to search next level down. */
searchLevel<with_removed>(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked);
}
/**
* Performs an exact search in the tree starting from a node.
*/
template<bool with_removed>
void searchLevelExact(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) const
{
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL)&&(node->child2 == NULL)) {
int index = node->divfeat;
if (with_removed) {
if (removed_points_.test(index)) return; // ignore removed points
}
DistanceType dist = distance_(node->point, vec, veclen_);
result_set.addPoint(dist,index);
return;
}
/* Which child branch should be taken first? */
ElementType val = vec[node->divfeat];
DistanceType diff = val - node->divval;
NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
/* Create a branch record for the branch not taken. Add distance
of this feature boundary (we don't attempt to correct for any
use of this feature in a parent node, which is unlikely to
happen and would have only a small effect). Don't bother
adding more branches to heap after halfway point, as cost of
adding exceeds their value.
*/
DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
/* Call recursively to search next level down. */
searchLevelExact<with_removed>(result_set, vec, bestChild, mindist, epsError);
if (mindist*epsError<=result_set.worstDist()) {
searchLevelExact<with_removed>(result_set, vec, otherChild, new_distsq, epsError);
}
}
void addPointToTree(NodePtr node, int ind)
{
ElementType* point = points_[ind];
if ((node->child1==NULL) && (node->child2==NULL)) {
ElementType* leaf_point = node->point;
ElementType max_span = 0;
size_t div_feat = 0;
for (size_t i=0;i<veclen_;++i) {
ElementType span = std::abs(point[i]-leaf_point[i]);
if (span > max_span) {
max_span = span;
div_feat = i;
}
}
NodePtr left = new(pool_) Node();
left->child1 = left->child2 = NULL;
NodePtr right = new(pool_) Node();
right->child1 = right->child2 = NULL;
if (point[div_feat]<leaf_point[div_feat]) {
left->divfeat = ind;
left->point = point;
right->divfeat = node->divfeat;
right->point = node->point;
}
else {
left->divfeat = node->divfeat;
left->point = node->point;
right->divfeat = ind;
right->point = point;
}
node->divfeat = div_feat;
node->divval = (point[div_feat]+leaf_point[div_feat])/2;
node->child1 = left;
node->child2 = right;
}
else {
if (point[node->divfeat]<node->divval) {
addPointToTree(node->child1,ind);
}
else {
addPointToTree(node->child2,ind);
}
}
}
private:
void swap(KDTreeIndex& other)
{
BaseClass::swap(other);
std::swap(trees_, other.trees_);
std::swap(tree_roots_, other.tree_roots_);
std::swap(pool_, other.pool_);
}
private:
enum
{
/**
* To improve efficiency, only SAMPLE_MEAN random values are used to
* compute the mean and variance at each level when building a tree.
* A value of 100 seems to perform as well as using all values.
*/
SAMPLE_MEAN = 100,
/**
* Top random dimensions to consider
*
* When creating random trees, the dimension on which to subdivide is
* selected at random from among the top RAND_DIM dimensions with the
* highest variance. A value of 5 works well.
*/
RAND_DIM=5
};
/**
* Number of randomized trees that are used
*/
int trees_;
DistanceType* mean_;
DistanceType* var_;
/**
* Array of k-d trees used to find neighbours.
*/
std::vector<NodePtr> tree_roots_;
/**
* Pooled memory allocator.
*
* Using a pooled memory allocator is more efficient
* than allocating memory directly when there is a large
* number small of memory allocations.
*/
PooledAllocator pool_;
USING_BASECLASS_SYMBOLS
}; // class KDTreeIndex
}
#endif //FLANN_KDTREE_INDEX_H_
|
GB_reduce_build_template.c | //------------------------------------------------------------------------------
// GB_build_template: T=build(S), and assemble any duplicate tuples
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// This template is used in GB_builder and the Generated/GB_red_build__*
// workers. This is the same for both vectors and matrices, since this step is
// agnostic about which vectors the entries appear.
{
// k unused for some uses of this template
#include "GB_unused.h"
if (ndupl == 0)
{
//----------------------------------------------------------------------
// no duplicates, just permute S into Tx
//----------------------------------------------------------------------
// If no duplicates are present, then GB_builder has already
// transplanted I_work into T->i, so this step does not need to
// construct T->i. The tuple values, in S, are copied or permuted into
// T->x.
if (K_work == NULL)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
for (int64_t t = tstart ; t < tend ; t++)
{
// Tx [t] = (ttype) S [t] ; with typecast
GB_CAST_ARRAY_TO_ARRAY (Tx, t, S, t) ;
}
}
}
else
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
for (int64_t t = tstart ; t < tend ; t++)
{
// Tx [t] = (ttype) S [K_work [t]] ; with typecast
GB_CAST_ARRAY_TO_ARRAY (Tx, t, S, K_work [t]) ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// assemble duplicates
//----------------------------------------------------------------------
// Entries in S must be copied into T->x, with any duplicates summed
// via the operator. T->i must also be constructed.
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnz = tnz_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
// find the first unique tuple owned by this slice
int64_t t ;
for (t = tstart ; t < tend ; t++)
{
// get the tuple and break if it is not a duplicate
if (I_work [t] >= 0) break ;
}
// scan all tuples and assemble any duplicates
for ( ; t < tend ; t++)
{
// get the t-th tuple, a unique tuple
int64_t i = I_work [t] ;
int64_t k = (K_work == NULL) ? t : K_work [t] ;
ASSERT (i >= 0) ;
// Tx [my_tnz] = S [k] ; with typecast
GB_CAST_ARRAY_TO_ARRAY (Tx, my_tnz, S, k) ;
Ti [my_tnz] = i ;
// assemble all duplicates that follow it. This may assemble
// the first duplicates in the next slice(s) (up to but not
// including the first unique tuple in the subsequent slice(s)).
for ( ; t+1 < nvals && I_work [t+1] < 0 ; t++)
{
// assemble the duplicate tuple
int64_t k = (K_work == NULL) ? (t+1) : K_work [t+1] ;
// Tx [my_tnz] += S [k] with typecast
GB_ADD_CAST_ARRAY_TO_ARRAY (Tx, my_tnz, S, k) ;
}
my_tnz++ ;
}
}
}
}
|
simplest.c | int main() {
int x;
#pragma omp parallel
{
int p;
if (1) {
#pragma omp atomic write
x = 0;
} else {
#pragma omp atomic read
p = x;
}
#pragma omp barrier
x;
}
}
|
convolution_3x3_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8b-8a-inch/8a-64-outch/8b
kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd63_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6 * 8;
int h_tiles = outh / 6 * 8;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
__fp16* tm2p = tm2.row<__fp16>(i / 12);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_pack8_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8b-8a-inch/8a-36-outch/8b
kernel_tm_pack8.create(inch / 8, 36, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 36; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd43_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_pack8_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
__fp16* tm2p = tm2.row<__fp16>(i / 12);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_pack8_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.8h, v5.8h}, [%1] \n" // r04 r05
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.8h, v13.8h}, [%2] \n" // r14 r15
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v9.h[3] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v9.h[7] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.8h, v5.8h}, [%3] \n" // r24 r25
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1] \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2] \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3] \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"add %1, %1, #32 \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #16 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #16 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #16 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += 16;
r1 += 16;
r2 += 16;
}
}
}
}
static void conv3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r04 r05 r06 r07
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r08
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r14 r15 r16 r17
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v14.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v14.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v14.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v14.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v13.h[0] \n"
"fmla v31.8h, v16.8h, v15.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v13.h[1] \n"
"fmla v31.8h, v17.8h, v15.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v13.h[2] \n"
"fmla v31.8h, v18.8h, v15.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v13.h[3] \n"
"fmla v31.8h, v19.8h, v15.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v13.h[4] \n"
"fmla v31.8h, v20.8h, v15.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v13.h[5] \n"
"fmla v31.8h, v21.8h, v15.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v13.h[6] \n"
"fmla v31.8h, v22.8h, v15.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v13.h[7] \n"
"fmla v31.8h, v23.8h, v15.h[7] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.8h}, [%2] \n" // r18
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v16.8h, v14.h[0] \n"
"fmla v31.8h, v16.8h, v8.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v17.8h, v14.h[1] \n"
"fmla v31.8h, v17.8h, v8.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v18.8h, v14.h[2] \n"
"fmla v31.8h, v18.8h, v8.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v12.h[3] \n"
"fmla v30.8h, v19.8h, v14.h[3] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v20.8h, v14.h[4] \n"
"fmla v31.8h, v20.8h, v8.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v21.8h, v14.h[5] \n"
"fmla v31.8h, v21.8h, v8.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v22.8h, v14.h[6] \n"
"fmla v31.8h, v22.8h, v8.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v12.h[7] \n"
"fmla v30.8h, v23.8h, v14.h[7] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r24 r25 r26 r27
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r28
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r04
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.8h}, [%2] \n" // r14
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r24
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #32 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #32 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
labyrinth.c | /* =============================================================================
*
* labyrinth.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "list.h"
#include "maze.h"
#include "router.h"
#include "thread.h"
#include "timer.h"
#include "types.h"
enum param_types {
PARAM_BENDCOST = (unsigned char)'b',
PARAM_THREAD = (unsigned char)'t',
PARAM_XCOST = (unsigned char)'x',
PARAM_YCOST = (unsigned char)'y',
PARAM_ZCOST = (unsigned char)'z',
};
enum param_defaults {
PARAM_DEFAULT_BENDCOST = 1,
PARAM_DEFAULT_THREAD = 1,
PARAM_DEFAULT_XCOST = 1,
PARAM_DEFAULT_YCOST = 1,
PARAM_DEFAULT_ZCOST = 2,
};
bool_t global_doPrint = FALSE;
char* global_inputFile = NULL;
long global_params[256]; /* 256 = ascii limit */
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" b <INT> [b]end cost (%i)\n", PARAM_DEFAULT_BENDCOST);
printf(" i <FILE> [i]nput file name (%s)\n", global_inputFile);
printf(" p [p]rint routed maze (false)\n");
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
printf(" x <UINT> [x] movement cost (%i)\n", PARAM_DEFAULT_XCOST);
printf(" y <UINT> [y] movement cost (%i)\n", PARAM_DEFAULT_YCOST);
printf(" z <UINT> [z] movement cost (%i)\n", PARAM_DEFAULT_ZCOST);
exit(1);
}
/* =============================================================================
* setDefaultParams
* =============================================================================
*/
static void
setDefaultParams ()
{
global_params[PARAM_BENDCOST] = PARAM_DEFAULT_BENDCOST;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
global_params[PARAM_XCOST] = PARAM_DEFAULT_XCOST;
global_params[PARAM_YCOST] = PARAM_DEFAULT_YCOST;
global_params[PARAM_ZCOST] = PARAM_DEFAULT_ZCOST;
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
setDefaultParams();
while ((opt = getopt(argc, argv, "b:i:pt:x:y:z:")) != -1) {
switch (opt) {
case 'b':
case 't':
case 'x':
case 'y':
case 'z':
global_params[(unsigned char)opt] = atol(optarg);
break;
case 'i':
global_inputFile = optarg;
break;
case 'p':
global_doPrint = TRUE;
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
GOTO_REAL();
/*
* Initialization
*/
parseArgs(argc, (char** const)argv);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
maze_t* mazePtr = maze_alloc();
assert(mazePtr);
long numPathToRoute = maze_read(mazePtr, global_inputFile);
router_t* routerPtr = router_alloc(global_params[PARAM_XCOST],
global_params[PARAM_YCOST],
global_params[PARAM_ZCOST],
global_params[PARAM_BENDCOST]);
assert(routerPtr);
list_t* pathVectorListPtr = list_alloc(NULL);
assert(pathVectorListPtr);
/*
* Run transactions
*/
router_solve_arg_t routerArg = {routerPtr, mazePtr, pathVectorListPtr};
TIMER_T startTime;
TIMER_READ(startTime);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
router_solve((void *)&routerArg);
}
#else
thread_start(router_solve, (void*)&routerArg);
#endif
GOTO_REAL();
TIMER_T stopTime;
TIMER_READ(stopTime);
long numPathRouted = 0;
list_iter_t it;
list_iter_reset(&it, pathVectorListPtr);
while (list_iter_hasNext(&it, pathVectorListPtr)) {
vector_t* pathVectorPtr = (vector_t*)list_iter_next(&it, pathVectorListPtr);
numPathRouted += vector_getSize(pathVectorPtr);
}
printf("Paths routed = %li\n", numPathRouted);
printf("\nTime = %lf\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution and clean up
*/
assert(numPathRouted <= numPathToRoute);
bool_t status = maze_checkPaths(mazePtr, pathVectorListPtr, global_doPrint);
assert(status == TRUE);
puts("Verification passed.");
maze_free(mazePtr);
router_free(routerPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of labyrinth.c
*
* =============================================================================
*/
|
GB_unaryop__ainv_fp64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_uint64
// op(A') function: GB_tran__ainv_fp64_uint64
// C type: double
// A type: uint64_t
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_uint64
(
double *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
relu_hcl_arm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: qtang@openailab.com
*/
#ifndef _RELU_KERNEL_ARM_H_
#define _RELU_KERNEL_ARM_H_
#include <arm_neon.h>
#include "../../../../../include/tengine_ir.h"
#include "../../../../op/relu_param.h"
static int perf_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0] ? input_tensor->dims[0] : 1;
int channels = input_tensor->dims[1] ? input_tensor->dims[1] : 1;
int h = output_tensor->dims[2] ? output_tensor->dims[2] : 1;
int w = input_tensor->dims[3] ? input_tensor->dims[3] : 1;
int size = h * w;
int c_step = h * w;
int b_step = channels * h * w;
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
float* input = input_data + n * b_step;
float* output = out_data + n * b_step;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input + c_step * q;
float* dst = output + c_step * q;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _zero = vdupq_n_f32(0.f);
for (; nn > 0; nn--)
{
float32x4_t _p = vld1q_f32(src);
_p = vmaxq_f32(_p, _zero);
vst1q_f32(dst, _p);
src += 4;
dst += 4;
}
#endif
for (; remain > 0; remain--)
{
if (src[0] < 0)
dst[0] = 0;
else
dst[0] = src[0];
src++;
dst++;
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
float* input = input_data + n * b_step;
float* output = out_data + n * b_step;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input + c_step * q;
float* dst = output + c_step * q;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(negative_slope);
for (; nn > 0; nn--)
{
float32x4_t _p = vld1q_f32(src);
uint32x4_t _lemask = vcleq_f32(_p, _zero);
float32x4_t _ps = vmulq_f32(_p, _slope);
_p = vbslq_f32(_lemask, _ps, _p);
vst1q_f32(dst, _p);
src += 4;
dst += 4;
}
#endif
for (; remain > 0; remain--)
{
if (src[0] < 0)
dst[0] = src[0] * negative_slope;
else
dst[0] = src[0];
src++;
dst++;
}
}
}
}
return 0;
}
#endif
|
laplace2d.c | /*
* Copyright 2017 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <string.h>
#include <openacc.h>
#include "timer.h"
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
return 0;
}
|
GB_binop__ne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int8)
// A*D function (colscale): GB (_AxD__ne_int8)
// D*A function (rowscale): GB (_DxB__ne_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int8)
// C=scalar+B GB (_bind1st__ne_int8)
// C=scalar+B' GB (_bind1st_tran__ne_int8)
// C=A+scalar GB (_bind2nd__ne_int8)
// C=A'+scalar GB (_bind2nd_tran__ne_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morpology is the the application of various kernels, of any size and even
% shape, to a image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
** The following test is for special floating point numbers of value NaN (not
** a number), that may be used within a Kernel Definition. NaN's are defined
** as part of the IEEE standard for floating point number representation.
**
** These are used as a Kernel value to mean that this kernel position is not
** part of the kernel neighbourhood for convolution or morphology processing,
** and thus should be ignored. This allows the use of 'shaped' kernels.
**
** The special properity that two NaN's are never equal, even if they are from
** the same variable allow you to test if a value is special NaN value.
**
** This macro IsNaN() is thus is only true if the value given is NaN.
*/
#define IsNan(a) ((a)!=(a))
/*
Other global definitions used by module.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel = kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *)NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification */
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum = +MagickHuge;
kernel->maximum = -MagickHuge;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* do not include this value in kernel */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetMagickToken(p,&p,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if ( kernel->minimum == MagickHuge )
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *)NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
token[MaxTextExtent];
const char
*p;
size_t
kernel_number;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p = kernel_string;
kernel = NULL;
kernel_number = 0;
while ( GetMagickToken(p,NULL,token), *token != '\0' ) {
/* ignore extra or multiple ';' kernel separators */
if ( *token != ';' ) {
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) *token) != 0)
new_kernel = ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel = ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if ( new_kernel == (KernelInfo *) NULL ) {
(void) FormatLocaleFile(stderr, "Failed to parse kernel number #%.20g\n",
(double) kernel_number);
if ( kernel != (KernelInfo *) NULL )
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if ( kernel == (KernelInfo *) NULL )
kernel = new_kernel;
else
LastKernelInfo(kernel)->next = new_kernel;
}
/* look for the next kernel in list */
p = strchr(p, ';');
if ( p == (char *) NULL )
break;
p++;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *)NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) AcquireAlignedMemory(1,
sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(double));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3] = +MagickSQ2;
kernel->values[5] = kernel->values[7] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) > MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if ( kernel->next != (KernelInfo *) NULL )
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *)RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNan(kernel1->values[i]) && !IsNan(kernel2->values[i]) )
return MagickFalse;
if ( IsNan(kernel2->values[i]) && !IsNan(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) > MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
*last;
last = kernel;
while(1) {
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) == MagickTrue )
break;
LastKernelInfo(last)->next = clone;
last = clone;
}
clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels.
%
% It is basically equivalent to as MorphologyImageChannel() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImageChannel() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically kernels are not normalized/scaled/blended by the
% 'convolve:scale' Image Artifact (setting), nor is the convolve bias
% (-bias setting or image->bias) loooked at, but must be supplied from the
% function arguments.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
status=MagickTrue;
changed=0;
progress=0;
p_view=AcquireCacheView(image);
q_view=AcquireCacheView(result_image);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
/*case DistanceMorphology:*/
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, x, -offy,1,
image->rows+kernel->height-1, exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
v;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(
p_indexes+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels)));
gamma += alpha;
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(gamma*
result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( p[r].opacity != GetPixelOpacity(q))
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (MagickRealType) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = (MagickRealType) 0;
/* default result is the original pixel value */
result.red = (MagickRealType) p[r].red;
result.green = (MagickRealType) p[r].green;
result.blue = (MagickRealType) p[r].blue;
result.opacity = QuantumRange - (MagickRealType) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (MagickRealType) GetPixelIndex(p_indexes+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(
result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-
k_pixels[u].opacity));
gamma += alpha;
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(gamma*
result.index));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) < PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
#if 0
This code has been obsoleted by the MorphologyPrimitiveDirect() function.
However it is still (almost) correct coding for Grayscale Morphology.
That is...
GrayErode is equivalent but with kernel values subtracted from pixels
without the kernel rotation
GreyDilate is equivalent but using Maximum() instead of Minimum()
using kernel rotation
It has thus been preserved for future implementation of those methods.
case DistanceMorphology:
/* Add kernel Value and select the minimum value found.
** The result is a iterative distance from edge of image shape.
**
** All Distance Kernels are symetrical, but that may not always
** be the case. For example how about a distance from left edges?
** To work correctly with asymetrical kernels the reflected kernel
** needs to be applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
#endif
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the image in two passes.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 'iterative' handling this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireCacheView(image);
auth_view=AcquireCacheView(image);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling theabove low level primitive application
** functions. This function handles any iteration loops, composition or
** re-iteration of results, and compound morphology methods that is based
** on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[80];
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *)NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"verbose"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue;
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special == MagickTrue )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose == MagickTrue )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose == MagickTrue ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose == MagickTrue ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose == MagickTrue && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose == MagickTrue && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
break;
case EdgeMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose == MagickTrue ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showkernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
if ( method == ConvolveMorphology || method == CorrelateMorphology )
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *)NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
artifact = GetImageArtifact(image,"morphology:compose");
compose = UndefinedCompositeOp; /* use default for method */
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, image->bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod
method, const ssize_t iterations,const KernelInfo *kernel, ExceptionInfo
*exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
MagickRealType t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) > MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) > MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) > MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNan(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showkernel' option request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) > MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNan(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNan(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
nmt_master_flat.c | #include "utils.h"
static void purify_generic_flat(nmt_field_flat *fl,flouble *mask,fcomplex **walm0,flouble **maps_in,fcomplex **alms_out)
{
if(fl->pure_b || fl->pure_e) {
nmt_purify_flat(fl,mask,walm0,maps_in,maps_in,alms_out);
}
else {
int im1;
for(im1=0;im1<fl->nmaps;im1++)
fs_map_product(fl->fs,maps_in[im1],mask,maps_in[im1]);
fs_map2alm(fl->fs,1,2*fl->pol,maps_in,alms_out);
}
}
void nmt_workspace_flat_free(nmt_workspace_flat *w)
{
int ii;
gsl_permutation_free(w->coupling_matrix_perm);
gsl_matrix_free(w->coupling_matrix_binned_gsl);
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
free(w->coupling_matrix_unbinned[ii]);
free(w->coupling_matrix_unbinned);
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
free(w->coupling_matrix_binned[ii]);
free(w->coupling_matrix_binned);
free(w->n_cells);
nmt_bins_flat_free(w->bin);
nmt_flatsky_info_free(w->fs);
free(w->mask1);
free(w->mask2);
#ifdef _ENABLE_FLAT_THEORY_ACCURATE
free(w->maskprod);
#endif //_ENABLE_FLAT_THEORY_ACCURATE
free(w);
}
static nmt_workspace_flat *nmt_workspace_flat_new(int ncls,nmt_flatsky_info *fs,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y)
{
int ii,ib=0;
nmt_workspace_flat *w=my_malloc(sizeof(nmt_workspace_flat));
w->ncls=ncls;
w->ellcut_x[0]=lmn_x;
w->ellcut_x[1]=lmx_x;
w->ellcut_y[0]=lmn_y;
w->ellcut_y[1]=lmx_y;
w->bin=nmt_bins_flat_create(bin->n_bands,bin->ell_0_list,bin->ell_f_list);
w->lmax=w->bin->ell_f_list[w->bin->n_bands-1];
w->fs=nmt_flatsky_info_alloc(fs->nx,fs->ny,fs->lx,fs->ly);
w->mask1=my_malloc(fs->npix*sizeof(flouble));
w->mask2=my_malloc(fs->npix*sizeof(flouble));
#ifdef _ENABLE_FLAT_THEORY_ACCURATE
w->maskprod=my_malloc(w->fs->npix*sizeof(flouble));
#endif //_ENABLE_FLAT_THEORY_ACCURATE
w->n_cells=my_calloc(w->bin->n_bands,sizeof(int));
w->coupling_matrix_unbinned=my_malloc(w->ncls*w->bin->n_bands*sizeof(flouble *));
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
w->coupling_matrix_unbinned[ii]=my_calloc(w->ncls*w->fs->n_ell,sizeof(flouble));
w->coupling_matrix_binned=my_malloc(w->ncls*w->bin->n_bands*sizeof(flouble *));
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
w->coupling_matrix_binned[ii]=my_calloc(w->ncls*w->bin->n_bands,sizeof(flouble));
w->coupling_matrix_binned_gsl=gsl_matrix_alloc(w->ncls*w->bin->n_bands,w->ncls*w->bin->n_bands);
w->coupling_matrix_perm=gsl_permutation_alloc(w->ncls*w->bin->n_bands);
return w;
}
nmt_workspace_flat *nmt_workspace_flat_read(char *fname)
{
int ii,nx,ny;
flouble lx,ly;
nmt_workspace_flat *w=my_malloc(sizeof(nmt_workspace_flat));
FILE *fi=my_fopen(fname,"rb");
my_fread(&(w->ncls),sizeof(int),1,fi);
my_fread(w->ellcut_x,sizeof(flouble),2,fi);
my_fread(w->ellcut_y,sizeof(flouble),2,fi);
my_fread(&(w->pe1),sizeof(int),1,fi);
my_fread(&(w->pe2),sizeof(int),1,fi);
my_fread(&(w->pb1),sizeof(int),1,fi);
my_fread(&(w->pb2),sizeof(int),1,fi);
my_fread(&nx,sizeof(int),1,fi);
my_fread(&ny,sizeof(int),1,fi);
my_fread(&lx,sizeof(flouble),1,fi);
my_fread(&ly,sizeof(flouble),1,fi);
w->fs=nmt_flatsky_info_alloc(nx,ny,lx,ly);
w->mask1=my_malloc(w->fs->npix*sizeof(flouble));
my_fread(w->mask1,sizeof(flouble),w->fs->npix,fi);
w->mask2=my_malloc(w->fs->npix*sizeof(flouble));
my_fread(w->mask2,sizeof(flouble),w->fs->npix,fi);
#ifdef _ENABLE_FLAT_THEORY_ACCURATE
w->maskprod=my_malloc(w->fs->npix*sizeof(flouble));
my_fread(w->maskprod,sizeof(flouble),w->fs->npix,fi);
#endif //_ENABLE_FLAT_THEORY_ACCURATE
w->bin=my_malloc(sizeof(nmt_binning_scheme_flat));
my_fread(&(w->bin->n_bands),sizeof(int),1,fi);
w->bin->ell_0_list=my_malloc(w->bin->n_bands*sizeof(flouble));
w->bin->ell_f_list=my_malloc(w->bin->n_bands*sizeof(flouble));
my_fread(w->bin->ell_0_list,sizeof(flouble),w->bin->n_bands,fi);
my_fread(w->bin->ell_f_list,sizeof(flouble),w->bin->n_bands,fi);
w->lmax=w->bin->ell_f_list[w->bin->n_bands-1];
w->n_cells=my_malloc(w->bin->n_bands*sizeof(int));
my_fread(w->n_cells,sizeof(int),w->bin->n_bands,fi);
w->coupling_matrix_unbinned=my_malloc(w->ncls*w->bin->n_bands*sizeof(flouble *));
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++) {
w->coupling_matrix_unbinned[ii]=my_malloc(w->ncls*w->fs->n_ell*sizeof(flouble));
my_fread(w->coupling_matrix_unbinned[ii],sizeof(flouble),w->ncls*w->fs->n_ell,fi);
}
w->coupling_matrix_binned=my_malloc(w->ncls*w->bin->n_bands*sizeof(flouble *));
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++) {
w->coupling_matrix_binned[ii]=my_malloc(w->ncls*w->bin->n_bands*sizeof(flouble));
my_fread(w->coupling_matrix_binned[ii],sizeof(flouble),w->ncls*w->bin->n_bands,fi);
}
w->coupling_matrix_binned_gsl=gsl_matrix_alloc(w->ncls*w->bin->n_bands,w->ncls*w->bin->n_bands);
w->coupling_matrix_perm=gsl_permutation_alloc(w->ncls*w->bin->n_bands);
gsl_matrix_fread(fi,w->coupling_matrix_binned_gsl);
gsl_permutation_fread(fi,w->coupling_matrix_perm);
fclose(fi);
return w;
}
void nmt_workspace_flat_write(nmt_workspace_flat *w,char *fname)
{
int ii;
FILE *fo=my_fopen(fname,"wb");
my_fwrite(&(w->ncls),sizeof(int),1,fo);
my_fwrite(w->ellcut_x,sizeof(flouble),2,fo);
my_fwrite(w->ellcut_y,sizeof(flouble),2,fo);
my_fwrite(&(w->pe1),sizeof(int),1,fo);
my_fwrite(&(w->pe2),sizeof(int),1,fo);
my_fwrite(&(w->pb1),sizeof(int),1,fo);
my_fwrite(&(w->pb2),sizeof(int),1,fo);
my_fwrite(&(w->fs->nx),sizeof(int),1,fo);
my_fwrite(&(w->fs->ny),sizeof(int),1,fo);
my_fwrite(&(w->fs->lx),sizeof(flouble),1,fo);
my_fwrite(&(w->fs->ly),sizeof(flouble),1,fo);
my_fwrite(w->mask1,sizeof(flouble),w->fs->npix,fo);
my_fwrite(w->mask2,sizeof(flouble),w->fs->npix,fo);
#ifdef _ENABLE_FLAT_THEORY_ACCURATE
my_fwrite(w->maskprod,sizeof(flouble),w->fs->npix,fo);
#endif //_ENABLE_FLAT_THEORY_ACCURATE
my_fwrite(&(w->bin->n_bands),sizeof(int),1,fo);
my_fwrite(w->bin->ell_0_list,sizeof(flouble),w->bin->n_bands,fo);
my_fwrite(w->bin->ell_f_list,sizeof(flouble),w->bin->n_bands,fo);
my_fwrite(w->n_cells,sizeof(int),w->bin->n_bands,fo);
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
my_fwrite(w->coupling_matrix_unbinned[ii],sizeof(flouble),w->ncls*w->fs->n_ell,fo);
for(ii=0;ii<w->ncls*w->bin->n_bands;ii++)
my_fwrite(w->coupling_matrix_binned[ii],sizeof(flouble),w->ncls*w->bin->n_bands,fo);
gsl_matrix_fwrite(fo,w->coupling_matrix_binned_gsl);
gsl_permutation_fwrite(fo,w->coupling_matrix_perm);
fclose(fo);
}
static int check_flatsky_infos(nmt_flatsky_info *fs1,nmt_flatsky_info *fs2)
{
if(fs1->nx!=fs2->nx) return 1;
if(fs1->ny!=fs2->ny) return 1;
if(fs1->lx!=fs2->lx) return 1;
if(fs1->ly!=fs2->ly) return 1;
return 0;
}
nmt_workspace_flat *nmt_compute_coupling_matrix_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,
flouble lmn_y,flouble lmx_y)
{
if(check_flatsky_infos(fl1->fs,fl2->fs))
report_error(NMT_ERROR_CONSISTENT_RESO,"Can only correlate fields defined on the same pixels!\n");
int ii;
nmt_workspace_flat *w=nmt_workspace_flat_new(fl1->nmaps*fl2->nmaps,fl1->fs,bin,
lmn_x,lmx_x,lmn_y,lmx_y);
nmt_flatsky_info *fs=fl1->fs;
w->pe1=fl1->pure_e;
w->pe2=fl2->pure_e;
w->pb1=fl1->pure_b;
w->pb2=fl2->pure_b;
for(ii=0;ii<fl1->fs->npix;ii++)
w->mask1[ii]=fl1->mask[ii];
for(ii=0;ii<fl2->fs->npix;ii++)
w->mask2[ii]=fl2->mask[ii];
fcomplex *cmask1,*cmask2;
flouble *maskprod,*cosarr,*sinarr,*kmodarr,*beamprod;
int *i_band,*i_band_nocut,*i_ring;
cmask1=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex));
fs_map2alm(fl1->fs,1,0,&(fl1->mask),&cmask1);
if(fl1==fl2)
cmask2=cmask1;
else {
cmask2=dftw_malloc(fs->ny*(fs->nx/2+1)*sizeof(fcomplex));
fs_map2alm(fl2->fs,1,0,&(fl2->mask),&cmask2);
}
i_ring=my_malloc(w->fs->npix*sizeof(int));
i_band=my_malloc(w->fs->npix*sizeof(int));
#ifdef _ENABLE_FLAT_THEORY_ACCURATE
maskprod=w->maskprod;
#else //_ENABLE_FLAT_THEORY_ACCURATE
maskprod=my_malloc(w->fs->npix*sizeof(flouble));
#endif //_ENABLE_FLAT_THEORY_ACCURATE
i_band_nocut=my_malloc(w->fs->npix*sizeof(int));
kmodarr=dftw_malloc(w->fs->npix*sizeof(flouble));
beamprod=dftw_malloc(w->fs->npix*sizeof(flouble));
if(w->ncls>1) {
cosarr=dftw_malloc(w->fs->npix*sizeof(flouble));
sinarr=dftw_malloc(w->fs->npix*sizeof(flouble));
}
int *x_out_range,*y_out_range;
x_out_range=my_calloc(fs->nx,sizeof(int));
y_out_range=my_calloc(fs->ny,sizeof(int));
for(ii=0;ii<fs->nx;ii++) {
flouble k;
if(2*ii<=fs->nx) k=ii*2*M_PI/fs->lx;
else k=-(fs->nx-ii)*2*M_PI/fs->lx;
if((k<=w->ellcut_x[1]) && (k>=w->ellcut_x[0]))
x_out_range[ii]=1;
}
for(ii=0;ii<fs->ny;ii++) {
flouble k;
if(2*ii<=fs->ny) k=ii*2*M_PI/fs->ly;
else k=-(fs->ny-ii)*2*M_PI/fs->ly;
if((k<=w->ellcut_y[1]) && (k>=w->ellcut_y[0]))
y_out_range[ii]=1;
}
#pragma omp parallel default(none) \
shared(fl1,fl2,fs,cmask1,cmask2,w,i_ring,i_band,i_band_nocut) \
shared(cosarr,sinarr,kmodarr,beamprod,maskprod) \
shared(x_out_range,y_out_range)
{
flouble dkx=2*M_PI/fs->lx;
flouble dky=2*M_PI/fs->ly;
int iy1,ix1;
int *n_cells_thr=my_calloc(w->bin->n_bands,sizeof(int));
gsl_interp_accel *intacc_beam=gsl_interp_accel_alloc();
#pragma omp for
for(iy1=0;iy1<fs->ny;iy1++) {
flouble ky;
int ik=0;
if(2*iy1<=fs->ny)
ky=iy1*dky;
else
ky=-(fs->ny-iy1)*dky;
for(ix1=0;ix1<fs->nx;ix1++) {
flouble kx,kmod,beam1,beam2;
int ix_here,index_here,index;
index=ix1+fs->nx*iy1;
if(2*ix1<=fs->nx) {
kx=ix1*dkx;
ix_here=ix1;
}
else {
kx=-(fs->nx-ix1)*dkx;
ix_here=fs->nx-ix1;
}
index_here=ix_here+(fs->nx/2+1)*iy1;
maskprod[index]=(creal(cmask1[index_here])*creal(cmask2[index_here])+
cimag(cmask1[index_here])*cimag(cmask2[index_here]));
kmod=sqrt(kx*kx+ky*ky);
beam1=nmt_k_function_eval(fl1->beam,kmod,intacc_beam);
beam2=nmt_k_function_eval(fl2->beam,kmod,intacc_beam);
kmodarr[index]=kmod;
beamprod[index]=beam1*beam2;
ik=nmt_bins_flat_search_fast(w->bin,kmod,ik);
if(y_out_range[iy1] || x_out_range[ix1])
i_band[index]=-1;
else {
if(ik>=0) {
i_band[index]=ik;
n_cells_thr[ik]++;
}
else
i_band[index]=-1;
}
i_band_nocut[index]=ik;
i_ring[index]=(int)(kmod*w->fs->i_dell);
if((i_ring[index]<0) || (i_ring[index]>=w->fs->n_ell))
i_ring[index]=-1;
if(w->ncls>1) {
flouble c,s;
if(kmod>0) {
c=kx/kmod;
s=ky/kmod;
}
else {
c=1.;
s=0.;
}
cosarr[index]=c*c-s*s;
sinarr[index]=2*s*c;
}
}
} //end omp for
#pragma omp critical
{
for(iy1=0;iy1<w->bin->n_bands;iy1++)
w->n_cells[iy1]+=n_cells_thr[iy1];
} //end omp critical
free(n_cells_thr);
gsl_interp_accel_free(intacc_beam);
} //end omp parallel
free(x_out_range);
free(y_out_range);
#pragma omp parallel default(none) \
shared(fs,i_ring,i_band,i_band_nocut,w) \
shared(cosarr,sinarr,kmodarr,maskprod,beamprod)
{
int iy1,ix1,ix2,iy2;
int pe1=w->pe1,pe2=w->pe2,pb1=w->pb1,pb2=w->pb2;
int pure_any=pe1 || pb1 || pe2 || pb2;
flouble **coup_unbinned_thr,**coup_binned_thr;
coup_unbinned_thr=my_malloc(w->bin->n_bands*w->ncls*sizeof(flouble *));
for(iy1=0;iy1<w->bin->n_bands*w->ncls;iy1++)
coup_unbinned_thr[iy1]=my_calloc(w->fs->n_ell*w->ncls,sizeof(flouble));
coup_binned_thr=my_malloc(w->bin->n_bands*w->ncls*sizeof(flouble *));
for(iy1=0;iy1<w->bin->n_bands*w->ncls;iy1++)
coup_binned_thr[iy1]=my_calloc(w->bin->n_bands*w->ncls,sizeof(flouble));
#pragma omp for
for(iy1=0;iy1<fs->ny;iy1++) {
for(ix1=0;ix1<fs->nx;ix1++) {
int index1=ix1+fs->nx*iy1;
int ik1=i_band[index1];
if(ik1>=0) {
flouble inv_k1=0;
ik1*=w->ncls;
if((index1>0) && (w->ncls>1))
inv_k1=1./kmodarr[index1];
for(iy2=0;iy2<fs->ny;iy2++) {
for(ix2=0;ix2<fs->nx;ix2++) {
int index2=ix2+fs->nx*iy2;
int ir2=i_ring[index2];
int ik2=i_band_nocut[index2];
flouble cdiff=1,sdiff=0,kr=1,mp;
int index;
int iy=iy1-iy2;
int ix=ix1-ix2;
if(iy<0) iy+=fs->ny;
if(ix<0) ix+=fs->nx;
ik2*=w->ncls;
ir2*=w->ncls;
index=ix+fs->nx*iy;
if(w->ncls>1) {
cdiff=cosarr[index1]*cosarr[index2]+sinarr[index1]*sinarr[index2];
sdiff=sinarr[index1]*cosarr[index2]-cosarr[index1]*sinarr[index2];
if((index1==0) && (index2==0))
kr=1;
else
kr=kmodarr[index2]*inv_k1;
kr*=kr;
}
mp=maskprod[index]*beamprod[index2];
if(w->ncls==1) {
if(ir2>=0)
coup_unbinned_thr[ik1+0][ir2+0]+=mp;
if(ik2>=0)
coup_binned_thr[ik1+0][ik2+0]+=mp;
}
if(w->ncls==2) {
flouble fc[2],fs[2];
fc[0]=cdiff*mp;
fs[0]=sdiff*mp;
if(pure_any) {
fc[1]=kr*mp; fs[1]=0;
}
if(ir2>=0) {
coup_unbinned_thr[ik1+0][ir2+0]+=fc[pe1+pe2]; //TE,TE
coup_unbinned_thr[ik1+0][ir2+1]-=fs[pe1+pe2]; //TE,TB
coup_unbinned_thr[ik1+1][ir2+0]+=fs[pb1+pb2]; //TB,TE
coup_unbinned_thr[ik1+1][ir2+1]+=fc[pb1+pb2]; //TB,TB
}
if(ik2>=0) {
coup_binned_thr[ik1+0][ik2+0]+=fc[pe1+pe2]; //TE,TE
coup_binned_thr[ik1+0][ik2+1]-=fs[pe1+pe2]; //TE,TB
coup_binned_thr[ik1+1][ik2+0]+=fs[pb1+pb2]; //TB,TE
coup_binned_thr[ik1+1][ik2+1]+=fc[pb1+pb2]; //TB,TB
}
}
if(w->ncls==4) {
flouble fc[2],fs[2];
fc[0]=cdiff; fs[0]=sdiff;
if(pure_any) {
fc[1]=kr; fs[1]=0;
}
if(ir2>=0) {
coup_unbinned_thr[ik1+0][ir2+0]+=fc[pe1]*fc[pe2]*mp; //EE,EE
coup_unbinned_thr[ik1+0][ir2+1]-=fc[pe1]*fs[pe2]*mp; //EE,EB
coup_unbinned_thr[ik1+0][ir2+2]-=fs[pe1]*fc[pe2]*mp; //EE,BE
coup_unbinned_thr[ik1+0][ir2+3]+=fs[pe1]*fs[pe2]*mp; //EE,BB
coup_unbinned_thr[ik1+1][ir2+0]+=fc[pe1]*fs[pb2]*mp; //EB,EE
coup_unbinned_thr[ik1+1][ir2+1]+=fc[pe1]*fc[pb2]*mp; //EB,EB
coup_unbinned_thr[ik1+1][ir2+2]-=fs[pe1]*fs[pb2]*mp; //EB,BE
coup_unbinned_thr[ik1+1][ir2+3]-=fs[pe1]*fc[pb2]*mp; //EB,BB
coup_unbinned_thr[ik1+2][ir2+0]+=fs[pb1]*fc[pe2]*mp; //BE,EE
coup_unbinned_thr[ik1+2][ir2+1]-=fs[pb1]*fs[pe2]*mp; //BE,EB
coup_unbinned_thr[ik1+2][ir2+2]+=fc[pb1]*fc[pe2]*mp; //BE,BE
coup_unbinned_thr[ik1+2][ir2+3]-=fc[pb1]*fs[pe2]*mp; //BE,BB
coup_unbinned_thr[ik1+3][ir2+0]+=fs[pb1]*fs[pb2]*mp; //BB,EE
coup_unbinned_thr[ik1+3][ir2+1]+=fs[pb1]*fc[pb2]*mp; //BB,EB
coup_unbinned_thr[ik1+3][ir2+2]+=fc[pb1]*fs[pb2]*mp; //BB,BE
coup_unbinned_thr[ik1+3][ir2+3]+=fc[pb1]*fc[pb2]*mp; //BB,BB
}
if(ik2>=0) {
coup_binned_thr[ik1+0][ik2+0]+=fc[pe1]*fc[pe2]*mp; //EE,EE
coup_binned_thr[ik1+0][ik2+1]-=fc[pe1]*fs[pe2]*mp; //EE,EB
coup_binned_thr[ik1+0][ik2+2]-=fs[pe1]*fc[pe2]*mp; //EE,BE
coup_binned_thr[ik1+0][ik2+3]+=fs[pe1]*fs[pe2]*mp; //EE,BB
coup_binned_thr[ik1+1][ik2+0]+=fc[pe1]*fs[pb2]*mp; //EB,EE
coup_binned_thr[ik1+1][ik2+1]+=fc[pe1]*fc[pb2]*mp; //EB,EB
coup_binned_thr[ik1+1][ik2+2]-=fs[pe1]*fs[pb2]*mp; //EB,BE
coup_binned_thr[ik1+1][ik2+3]-=fs[pe1]*fc[pb2]*mp; //EB,BB
coup_binned_thr[ik1+2][ik2+0]+=fs[pb1]*fc[pe2]*mp; //BE,EE
coup_binned_thr[ik1+2][ik2+1]-=fs[pb1]*fs[pe2]*mp; //BE,EB
coup_binned_thr[ik1+2][ik2+2]+=fc[pb1]*fc[pe2]*mp; //BE,BE
coup_binned_thr[ik1+2][ik2+3]-=fc[pb1]*fs[pe2]*mp; //BE,BB
coup_binned_thr[ik1+3][ik2+0]+=fs[pb1]*fs[pb2]*mp; //BB,EE
coup_binned_thr[ik1+3][ik2+1]+=fs[pb1]*fc[pb2]*mp; //BB,EB
coup_binned_thr[ik1+3][ik2+2]+=fc[pb1]*fs[pb2]*mp; //BB,BE
coup_binned_thr[ik1+3][ik2+3]+=fc[pb1]*fc[pb2]*mp; //BB,BB
}
}
}
}
}
}
} //end omp for
#pragma omp critical
{
for(iy1=0;iy1<w->ncls*w->bin->n_bands;iy1++) {
for(iy2=0;iy2<w->ncls*w->bin->n_bands;iy2++)
w->coupling_matrix_binned[iy1][iy2]+=coup_binned_thr[iy1][iy2];
for(iy2=0;iy2<w->ncls*w->fs->n_ell;iy2++)
w->coupling_matrix_unbinned[iy1][iy2]+=coup_unbinned_thr[iy1][iy2];
}
} //end omp critical
for(iy1=0;iy1<w->bin->n_bands*w->ncls;iy1++) {
free(coup_unbinned_thr[iy1]);
free(coup_binned_thr[iy1]);
}
free(coup_unbinned_thr);
free(coup_binned_thr);
} //end omp parallel
#pragma omp parallel default(none) \
shared(w,fs)
{
int il1;
flouble fac_norm=4*M_PI*M_PI/(fs->lx*fs->lx*fs->ly*fs->ly);
#pragma omp for
for(il1=0;il1<w->bin->n_bands;il1++) {
int icl1;
flouble norm;
if(w->n_cells[il1]>0)
norm=fac_norm/w->n_cells[il1];
else
norm=0;
for(icl1=0;icl1<w->ncls;icl1++) {
int il2;
for(il2=0;il2<w->fs->n_ell;il2++) {
int icl2;
for(icl2=0;icl2<w->ncls;icl2++)
w->coupling_matrix_unbinned[w->ncls*il1+icl1][w->ncls*il2+icl2]*=norm;
}
for(il2=0;il2<w->bin->n_bands;il2++) {
int icl2;
for(icl2=0;icl2<w->ncls;icl2++)
w->coupling_matrix_binned[w->ncls*il1+icl1][w->ncls*il2+icl2]*=norm;
}
}
} //end omp for
} //end omp parallel
int icl_a,icl_b,ib2,ib3,sig;
for(icl_a=0;icl_a<w->ncls;icl_a++) {
for(icl_b=0;icl_b<w->ncls;icl_b++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++) {
for(ib3=0;ib3<w->bin->n_bands;ib3++) {
gsl_matrix_set(w->coupling_matrix_binned_gsl,w->ncls*ib2+icl_a,w->ncls*ib3+icl_b,
w->coupling_matrix_binned[w->ncls*ib2+icl_a][w->ncls*ib3+icl_b]);
}
}
}
}
gsl_linalg_LU_decomp(w->coupling_matrix_binned_gsl,w->coupling_matrix_perm,&sig);
dftw_free(cmask1);
if(fl1!=fl2)
dftw_free(cmask2);
free(i_ring);
free(i_band);
free(i_band_nocut);
dftw_free(kmodarr);
dftw_free(beamprod);
#ifndef _ENABLE_FLAT_THEORY_ACCURATE
free(maskprod);
#endif //_ENABLE_FLAT_THEORY_ACCURATE
if(w->ncls>1) {
dftw_free(cosarr);
dftw_free(sinarr);
}
return w;
}
void nmt_compute_deprojection_bias_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y,
int nl_prop,flouble *l_prop,flouble **cl_proposal,
flouble **cl_bias)
{
//Placeholder
int ii;
long ip;
int nspec=fl1->nmaps*fl2->nmaps;
flouble **cl_dum=my_malloc(nspec*sizeof(flouble *));
nmt_k_function **cl_proposal_f=my_malloc(nspec*sizeof(nmt_k_function *));
for(ii=0;ii<nspec;ii++) {
cl_dum[ii]=my_calloc(bin->n_bands,sizeof(flouble));
cl_proposal_f[ii]=nmt_k_function_alloc(nl_prop,l_prop,cl_proposal[ii],cl_proposal[ii][0],0,0);
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[ii][ip]=0;
}
if(check_flatsky_infos(fl1->fs,fl2->fs))
report_error(NMT_ERROR_CONSISTENT_RESO,"Can only correlate fields defined on the same pixels!\n");
//TODO: some terms (e.g. C^ab*SHT[w*g^j]) could be precomputed
//TODO: if fl1=fl2 F2=F3
//Allocate dummy maps and alms
flouble **map_1_dum=my_malloc(fl1->nmaps*sizeof(flouble *));
fcomplex **alm_1_dum=my_malloc(fl1->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl1->nmaps;ii++) {
map_1_dum[ii]=dftw_malloc(fl1->npix*sizeof(flouble));
alm_1_dum[ii]=dftw_malloc(fl1->fs->ny*(fl1->fs->nx/2+1)*sizeof(fcomplex));
}
flouble **map_2_dum=my_malloc(fl2->nmaps*sizeof(flouble *));
fcomplex **alm_2_dum=my_malloc(fl2->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl2->nmaps;ii++) {
map_2_dum[ii]=dftw_malloc(fl2->npix*sizeof(flouble));
alm_2_dum[ii]=dftw_malloc(fl2->fs->ny*(fl2->fs->nx/2+1)*sizeof(fcomplex));
}
if(fl2->ntemp>0) {
int iti;
for(iti=0;iti<fl2->ntemp;iti++) {
int itj;
for(itj=0;itj<fl2->ntemp;itj++) {
int im1,im2;
double nij=gsl_matrix_get(fl2->matrix_M,iti,itj);
//w*g^j
for(im2=0;im2<fl2->nmaps;im2++)
fs_map_product(fl2->fs,fl2->temp[itj][im2],fl2->mask,map_2_dum[im2]);
//DFT[w*g^j]
fs_map2alm(fl2->fs,1,2*fl2->pol,map_2_dum,alm_2_dum);
//C^ab*DFT[w*g^j]
for(im1=0;im1<fl1->nmaps;im1++) {
fs_zero_alm(fl1->fs,alm_1_dum[im1]);
for(im2=0;im2<fl2->nmaps;im2++)
fs_alter_alm(fl2->fs,-1.,alm_2_dum[im2],alm_1_dum[im1],cl_proposal_f[im1*fl2->nmaps+im2],1);
}
//DFT^-1[C^ab*DFT[w*g^j]]
fs_alm2map(fl1->fs,1,2*fl1->pol,map_1_dum,alm_1_dum);
//DFT[v*DFT^-1[C^ab*DFT[w*g^j]]]
purify_generic_flat(fl1,fl1->mask,fl1->a_mask,map_1_dum,alm_1_dum);
//Sum_m(DFT[v*DFT^-1[C^ab*DFT[w*g^j]]]*g^i*)/(2l+1)
fs_alm2cl(fl1->fs,bin,alm_1_dum,fl2->a_temp[iti],fl1->pol,fl2->pol,cl_dum,
lmn_x,lmx_x,lmn_y,lmx_y);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[im1][ip]-=cl_dum[im1][ip]*nij;
}
}
}
}
if(fl1->ntemp>0) {
int iti;
for(iti=0;iti<fl1->ntemp;iti++) {
int itj;
for(itj=0;itj<fl1->ntemp;itj++) {
int im1,im2;
double mij=gsl_matrix_get(fl1->matrix_M,iti,itj);
//v*f^j
for(im1=0;im1<fl1->nmaps;im1++)
fs_map_product(fl1->fs,fl1->temp[itj][im1],fl1->mask,map_1_dum[im1]);
//DFT[v*f^j]
fs_map2alm(fl1->fs,1,2*fl1->pol,map_1_dum,alm_1_dum);
//C^abT*DFT[v*f^j]
for(im2=0;im2<fl2->nmaps;im2++) {
fs_zero_alm(fl2->fs,alm_2_dum[im2]);
for(im1=0;im1<fl1->nmaps;im1++)
fs_alter_alm(fl1->fs,-1.,alm_1_dum[im1],alm_2_dum[im2],cl_proposal_f[im1*fl2->nmaps+im2],1);
}
//DFT^-1[C^abT*DFT[v*f^j]]
fs_alm2map(fl2->fs,1,2*fl2->pol,map_2_dum,alm_2_dum);
//DFT[w*DFT^-1[C^abT*DFT[v*f^j]]]
purify_generic_flat(fl2,fl2->mask,fl2->a_mask,map_2_dum,alm_2_dum);
//Sum_m(f^i*DFT[w*DFT^-1[C^abT*DFT[v*f^j]]]^*)/(2l+1)
fs_alm2cl(fl1->fs,bin,fl1->a_temp[iti],alm_2_dum,fl1->pol,fl2->pol,cl_dum,
lmn_x,lmx_x,lmn_y,lmx_y);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[im1][ip]-=cl_dum[im1][ip]*mij;
}
}
}
}
if((fl1->ntemp>0) && (fl2->ntemp>0)) {
int iti,itj,itp,itq,im1,im2;
flouble *mat_prod=my_calloc(fl1->ntemp*fl2->ntemp,sizeof(flouble));
for(itj=0;itj<fl1->ntemp;itj++) {
for(itq=0;itq<fl2->ntemp;itq++) {
//w*g^q
for(im2=0;im2<fl2->nmaps;im2++)
fs_map_product(fl2->fs,fl2->temp[itq][im2],fl2->mask,map_2_dum[im2]);
//DFT[w*g^q]
fs_map2alm(fl2->fs,1,2*fl2->pol,map_2_dum,alm_2_dum);
//C^ab*DFT[w*g^q]
for(im1=0;im1<fl1->nmaps;im1++) {
fs_zero_alm(fl1->fs,alm_1_dum[im1]);
for(im2=0;im2<fl2->nmaps;im2++)
fs_alter_alm(fl2->fs,-1.,alm_2_dum[im2],alm_1_dum[im1],cl_proposal_f[im1*fl2->nmaps+im2],1);
}
//DFT^-1[C^ab*DFT[w*g^q]]
fs_alm2map(fl1->fs,1,2*fl1->pol,map_1_dum,alm_1_dum);
for(im1=0;im1<fl1->nmaps;im1++) {
//v*DFT^-1[C^ab*DFT[w*g^q]]
fs_map_product(fl1->fs,map_1_dum[im1],fl1->mask,map_1_dum[im1]);
//Int[f^jT*v*DFT^-1[C^ab*DFT[w*g^q]]]
mat_prod[itj*fl2->ntemp+itq]+=fs_map_dot(fl1->fs,map_1_dum[im1],fl1->temp[itj][im1]);
}
}
}
for(iti=0;iti<fl1->ntemp;iti++) {
for(itp=0;itp<fl2->ntemp;itp++) {
//Sum_m(f^i*g^p*)/(2l+1)
fs_alm2cl(fl1->fs,bin,fl1->a_temp[iti],fl2->a_temp[itp],fl1->pol,fl2->pol,cl_dum,
lmn_x,lmx_x,lmn_y,lmx_y);
for(itj=0;itj<fl1->ntemp;itj++) {
double mij=gsl_matrix_get(fl1->matrix_M,iti,itj);
for(itq=0;itq<fl2->ntemp;itq++) {
double npq=gsl_matrix_get(fl2->matrix_M,itp,itq);
for(im1=0;im1<nspec;im1++) {
for(ip=0;ip<bin->n_bands;ip++)
cl_bias[im1][ip]+=cl_dum[im1][ip]*mat_prod[itj*fl2->ntemp+itq]*mij*npq;
}
}
}
}
}
free(mat_prod);
}
for(ii=0;ii<fl1->nmaps;ii++) {
dftw_free(map_1_dum[ii]);
dftw_free(alm_1_dum[ii]);
}
free(map_1_dum);
free(alm_1_dum);
for(ii=0;ii<fl2->nmaps;ii++) {
dftw_free(map_2_dum[ii]);
dftw_free(alm_2_dum[ii]);
}
free(map_2_dum);
free(alm_2_dum);
for(ii=0;ii<nspec;ii++) {
free(cl_dum[ii]);
nmt_k_function_free(cl_proposal_f[ii]);
}
free(cl_proposal_f);
free(cl_dum);
return;
}
#ifdef _ENABLE_FLAT_THEORY_ACCURATE
void nmt_couple_cl_l_flat_accurate(nmt_workspace_flat *w,int nl,flouble *larr,flouble **cl_in,flouble **cl_out)
{
//Zero input array
int ii;
for(ii=0;ii<w->ncls;ii++) {
int jj;
for(jj=0;jj<w->bin->n_bands;jj++)
cl_out[ii][jj]=0;
}
//Precompute angles, mode lengths and band indices
flouble *cosarr,*sinarr,*kmodarr;
int *i_band=my_malloc(w->fs->npix*sizeof(int));
flouble *clmaps=my_malloc(w->ncls*w->fs->npix*sizeof(flouble));
nmt_k_function **fcl=my_malloc(w->ncls*sizeof(nmt_k_function *));
for(ii=0;ii<w->ncls;ii++)
fcl[ii]=nmt_k_function_alloc(nl,larr,cl_in[ii],cl_in[ii][0],0.,0);
if(w->ncls>1) {
kmodarr=dftw_malloc(w->fs->npix*sizeof(flouble));
cosarr=dftw_malloc(w->fs->npix*sizeof(flouble));
sinarr=dftw_malloc(w->fs->npix*sizeof(flouble));
}
int *x_out_range,*y_out_range;
x_out_range=my_calloc(w->fs->nx,sizeof(int));
y_out_range=my_calloc(w->fs->ny,sizeof(int));
for(ii=0;ii<w->fs->nx;ii++) {
flouble k;
if(2*ii<=w->fs->nx) k=ii*2*M_PI/w->fs->lx;
else k=-(w->fs->nx-ii)*2*M_PI/w->fs->lx;
if((k<=w->ellcut_x[1]) && (k>=w->ellcut_x[0]))
x_out_range[ii]=1;
}
for(ii=0;ii<w->fs->ny;ii++) {
flouble k;
if(2*ii<=w->fs->ny) k=ii*2*M_PI/w->fs->ly;
else k=-(w->fs->ny-ii)*2*M_PI/w->fs->ly;
if((k<=w->ellcut_y[1]) && (k>=w->ellcut_y[0]))
y_out_range[ii]=1;
}
#pragma omp parallel default(none) \
shared(w,i_band,cosarr,sinarr,kmodarr) \
shared(x_out_range,y_out_range,fcl,clmaps)
{
flouble dkx=2*M_PI/w->fs->lx;
flouble dky=2*M_PI/w->fs->ly;
int iy1,ix1;
gsl_interp_accel *intacc=gsl_interp_accel_alloc();
#pragma omp for
for(iy1=0;iy1<w->fs->ny;iy1++) {
flouble ky;
int ik=0;
if(2*iy1<=w->fs->ny)
ky=iy1*dky;
else
ky=-(w->fs->ny-iy1)*dky;
for(ix1=0;ix1<w->fs->nx;ix1++) {
flouble kx,kmod;
int ix_here,index,ic;
index=ix1+w->fs->nx*iy1;
if(2*ix1<=w->fs->nx) {
kx=ix1*dkx;
ix_here=ix1;
}
else {
kx=-(w->fs->nx-ix1)*dkx;
ix_here=w->fs->nx-ix1;
}
kmod=sqrt(kx*kx+ky*ky);
for(ic=0;ic<w->ncls;ic++)
clmaps[ic+index*w->ncls]=nmt_k_function_eval(fcl[ic],kmod,intacc);
if(y_out_range[iy1] || x_out_range[ix1])
i_band[index]=-1;
else {
int ic;
ik=nmt_bins_flat_search_fast(w->bin,kmod,ik);
if(ik>=0)
i_band[index]=ik;
else
i_band[index]=-1;
if(w->ncls>1) {
flouble c,s;
if(kmod>0) {
c=kx/kmod;
s=ky/kmod;
}
else {
c=1.;
s=0.;
}
kmodarr[index]=kmod;
cosarr[index]=c*c-s*s;
sinarr[index]=2*s*c;
}
}
}
} //end omp for
gsl_interp_accel_free(intacc);
} //end omp parallel
free(x_out_range);
free(y_out_range);
for(ii=0;ii<w->ncls;ii++)
nmt_k_function_free(fcl[ii]);
free(fcl);
#pragma omp parallel default(none) \
shared(i_band,w,cosarr,sinarr,kmodarr,clmaps,cl_out)
{
int iy1,ix1,ix2,iy2;
int pe1=w->pe1,pe2=w->pe2,pb1=w->pb1,pb2=w->pb2;
int pure_any=pe1 || pb1 || pe2 || pb2;
flouble **cl_out_thr=my_malloc(w->ncls*sizeof(flouble *));
for(iy1=0;iy1<w->ncls;iy1++)
cl_out_thr[iy1]=my_calloc(w->bin->n_bands,sizeof(flouble));
#pragma omp for
for(iy1=0;iy1<w->fs->ny;iy1++) {
for(ix1=0;ix1<w->fs->nx;ix1++) {
int index1=ix1+w->fs->nx*iy1;
int ik1=i_band[index1];
if(ik1>=0) {
flouble inv_k1=0;
if((index1>0) && (w->ncls>1))
inv_k1=1./kmodarr[index1];
for(iy2=0;iy2<w->fs->ny;iy2++) {
for(ix2=0;ix2<w->fs->nx;ix2++) {
int index;
flouble mp,cdiff=1,sdiff=0,kr=1;
int index2=ix2+w->fs->nx*iy2;
flouble *cls_in=&(clmaps[w->ncls*index2]);
int iy=iy1-iy2;
int ix=ix1-ix2;
if(iy<0) iy+=w->fs->ny;
if(ix<0) ix+=w->fs->nx;
index=ix+w->fs->nx*iy;
if(w->ncls>1) {
cdiff=cosarr[index1]*cosarr[index2]+sinarr[index1]*sinarr[index2];
sdiff=sinarr[index1]*cosarr[index2]-cosarr[index1]*sinarr[index2];
if((index1==0) && (index2==0))
kr=1;
else
kr=kmodarr[index2]*inv_k1;
kr*=kr;
}
mp=w->maskprod[index];
if(w->ncls==1) {
cl_out_thr[0][ik1]+=mp*cls_in[0];
}
if(w->ncls==2) {
flouble fc[2],fs[2];
fc[0]=cdiff*mp;
fs[0]=sdiff*mp;
if(pure_any) {
fc[1]=kr*mp; fs[1]=0;
}
cl_out_thr[0][ik1]+=fc[pe1+pe2]*cls_in[0]; //TE,TE
cl_out_thr[0][ik1]-=fs[pe1+pe2]*cls_in[1]; //TE,TB
cl_out_thr[1][ik1]+=fs[pb1+pb2]*cls_in[0]; //TB,TE
cl_out_thr[1][ik1]+=fc[pb1+pb2]*cls_in[1]; //TB,TB
}
if(w->ncls==4) {
flouble fc[2],fs[2];
fc[0]=cdiff; fs[0]=sdiff;
if(pure_any) {
fc[1]=kr; fs[1]=0;
}
cl_out_thr[0][ik1]+=fc[pe1]*fc[pe2]*mp*cls_in[0]; //EE,EE
cl_out_thr[0][ik1]-=fc[pe1]*fs[pe2]*mp*cls_in[1]; //EE,EB
cl_out_thr[0][ik1]-=fs[pe1]*fc[pe2]*mp*cls_in[2]; //EE,BE
cl_out_thr[0][ik1]+=fs[pe1]*fs[pe2]*mp*cls_in[3]; //EE,BB
cl_out_thr[1][ik1]+=fc[pe1]*fs[pb2]*mp*cls_in[0]; //EB,EE
cl_out_thr[1][ik1]+=fc[pe1]*fc[pb2]*mp*cls_in[1]; //EB,EB
cl_out_thr[1][ik1]-=fs[pe1]*fs[pb2]*mp*cls_in[2]; //EB,BE
cl_out_thr[1][ik1]-=fs[pe1]*fc[pb2]*mp*cls_in[3]; //EB,BB
cl_out_thr[2][ik1]+=fs[pb1]*fc[pe2]*mp*cls_in[0]; //BE,EE
cl_out_thr[2][ik1]-=fs[pb1]*fs[pe2]*mp*cls_in[1]; //BE,EB
cl_out_thr[2][ik1]+=fc[pb1]*fc[pe2]*mp*cls_in[2]; //BE,BE
cl_out_thr[2][ik1]-=fc[pb1]*fs[pe2]*mp*cls_in[3]; //BE,BB
cl_out_thr[3][ik1]+=fs[pb1]*fs[pb2]*mp*cls_in[0]; //BB,EE
cl_out_thr[3][ik1]+=fs[pb1]*fc[pb2]*mp*cls_in[1]; //BB,EB
cl_out_thr[3][ik1]+=fc[pb1]*fs[pb2]*mp*cls_in[2]; //BB,BE
cl_out_thr[3][ik1]+=fc[pb1]*fc[pb2]*mp*cls_in[3]; //BB,BB
}
}
}
}
}
} //end omp for
#pragma omp critical
{
for(iy1=0;iy1<w->ncls;iy1++) {
for(iy2=0;iy2<w->bin->n_bands;iy2++)
cl_out[iy1][iy2]+=cl_out_thr[iy1][iy2];
}
} //end omp critical
for(iy1=0;iy1<w->ncls;iy1++)
free(cl_out_thr[iy1]);
free(cl_out_thr);
} //end omp parallel
int il1;
flouble fac_norm=4*M_PI*M_PI/(w->fs->lx*w->fs->lx*w->fs->ly*w->fs->ly);
for(il1=0;il1<w->bin->n_bands;il1++) {
int icl1;
flouble norm;
if(w->n_cells[il1]>0)
norm=fac_norm/w->n_cells[il1];
else
norm=0;
for(icl1=0;icl1<w->ncls;icl1++)
cl_out[icl1][il1]*=norm;
}
free(i_band);
free(clmaps);
if(w->ncls>1) {
dftw_free(kmodarr);
dftw_free(cosarr);
dftw_free(sinarr);
}
}
#endif //_ENABLE_FLAT_THEORY_ACCURATE
void nmt_couple_cl_l_flat_fast(nmt_workspace_flat *w,int nl,flouble *larr,flouble **cl_in,flouble **cl_out)
{
int ii;
flouble *cl_in_rings=my_calloc(w->ncls*w->fs->n_ell,sizeof(flouble));
int *n_cells=my_calloc(w->fs->n_ell,sizeof(int));
nmt_k_function **fcl=my_malloc(w->ncls*sizeof(nmt_k_function *));
for(ii=0;ii<w->ncls;ii++)
fcl[ii]=nmt_k_function_alloc(nl,larr,cl_in[ii],cl_in[ii][0],0.,0);
//Interpolate input power spectrum onto grid and bin into rings
#pragma omp parallel default(none) \
shared(w,fcl,cl_in_rings,n_cells)
{
int iy1,ix1;
flouble dkx=2*M_PI/w->fs->lx;
flouble dky=2*M_PI/w->fs->ly;
flouble *cl_in_rings_thr=my_calloc(w->ncls*w->fs->n_ell,sizeof(flouble));
int *n_cells_thr=my_calloc(w->fs->n_ell,sizeof(int));
gsl_interp_accel *intacc=gsl_interp_accel_alloc();
#pragma omp for
for(iy1=0;iy1<w->fs->ny;iy1++) {
flouble ky;
if(2*iy1<=w->fs->ny)
ky=iy1*dky;
else
ky=-(w->fs->ny-iy1)*dky;
for(ix1=0;ix1<w->fs->nx;ix1++) {
flouble kx,kmod;
int ir;
if(2*ix1<=w->fs->nx)
kx=ix1*dkx;
else
kx=-(w->fs->nx-ix1)*dkx;
kmod=sqrt(kx*kx+ky*ky);
ir=(int)(kmod*w->fs->i_dell);
if(ir<w->fs->n_ell) {
int ic,ind0=ir*w->ncls;
n_cells_thr[ir]++;
for(ic=0;ic<w->ncls;ic++)
cl_in_rings_thr[ind0+ic]+=nmt_k_function_eval(fcl[ic],kmod,intacc);
}
}
} //end omp for
#pragma omp critical
{
for(iy1=0;iy1<w->fs->n_ell;iy1++)
n_cells[iy1]+=n_cells_thr[iy1];
for(iy1=0;iy1<w->fs->n_ell*w->ncls;iy1++)
cl_in_rings[iy1]+=cl_in_rings_thr[iy1];
} //end omp critical
free(cl_in_rings_thr);
free(n_cells_thr);
gsl_interp_accel_free(intacc);
} //end omp parallel
for(ii=0;ii<w->fs->n_ell;ii++) {
int ic;
for(ic=0;ic<w->ncls;ic++) {
if(n_cells[ii]>0)
cl_in_rings[ii*w->ncls+ic]/=n_cells[ii];
}
}
//Convolve with mode-coupling matrix
for(ii=0;ii<w->ncls;ii++) {
int i1;
for(i1=0;i1<w->bin->n_bands;i1++) {
int ind2,ind1=i1*w->ncls+ii;
cl_out[ii][i1]=0;
for(ind2=0;ind2<w->ncls*w->fs->n_ell;ind2++)
cl_out[ii][i1]+=w->coupling_matrix_unbinned[ind1][ind2]*cl_in_rings[ind2];
}
}
//Free up
free(cl_in_rings);
free(n_cells);
for(ii=0;ii<w->ncls;ii++)
nmt_k_function_free(fcl[ii]);
free(fcl);
}
void nmt_couple_cl_l_flat_quick(nmt_workspace_flat *w,int nl,flouble *larr,flouble **cl_in,flouble **cl_out)
{
int ii;
flouble **cell_in=my_malloc(w->ncls*sizeof(flouble *));
gsl_interp_accel *intacc=gsl_interp_accel_alloc();
for(ii=0;ii<w->ncls;ii++) {
nmt_k_function *fcl=nmt_k_function_alloc(nl,larr,cl_in[ii],cl_in[ii][0],0.,0);
cell_in[ii]=my_calloc(w->bin->n_bands,sizeof(flouble));
int iy;
flouble dkx=2*M_PI/w->fs->lx;
flouble dky=2*M_PI/w->fs->ly;
for(iy=0;iy<w->fs->ny;iy++) {
flouble ky;
int ik=0;
if(2*iy<=w->fs->ny)
ky=iy*dky;
else
ky=-(w->fs->ny-iy)*dky;
if((ky>w->ellcut_y[1]) || (ky<w->ellcut_y[0])) {
int ix;
for(ix=0;ix<w->fs->nx;ix++) {
flouble kx;
if(2*ix<=w->fs->nx)
kx=ix*dkx;
else
kx=-(w->fs->nx-ix)*dkx;
if((kx>w->ellcut_x[1]) || (kx<w->ellcut_x[0])) {
double kmod=sqrt(kx*kx+ky*ky);
ik=nmt_bins_flat_search_fast(w->bin,kmod,ik);
if(ik>=0)
cell_in[ii][ik]+=nmt_k_function_eval(fcl,kmod,intacc);
}
}
}
}
for(iy=0;iy<w->bin->n_bands;iy++) {
if(w->n_cells[iy]>0)
cell_in[ii][iy]/=w->n_cells[iy];
else
cell_in[ii][iy]=0;
}
nmt_k_function_free(fcl);
}
gsl_interp_accel_free(intacc);
int icl1;
for(icl1=0;icl1<w->ncls;icl1++) {
int i1;
for(i1=0;i1<w->bin->n_bands;i1++) {
int icl2;
int ind1=i1*w->ncls+icl1;
cl_out[icl1][i1]=0;
for(icl2=0;icl2<w->ncls;icl2++) {
int i2;
for(i2=0;i2<w->bin->n_bands;i2++) {
int ind2=i2*w->ncls+icl2;
cl_out[icl1][i1]+=w->coupling_matrix_binned[ind1][ind2]*cell_in[icl2][i2];
}
}
}
}
for(ii=0;ii<w->ncls;ii++)
free(cell_in[ii]);
free(cell_in);
}
void nmt_decouple_cl_l_flat(nmt_workspace_flat *w,flouble **cl_in,flouble **cl_noise_in,
flouble **cl_bias,flouble **cl_out)
{
int icl,ib2;
gsl_vector *dl_map_bad_b=gsl_vector_alloc(w->ncls*w->bin->n_bands);
gsl_vector *dl_map_good_b=gsl_vector_alloc(w->ncls*w->bin->n_bands);
//Bin coupled power spectrum
for(icl=0;icl<w->ncls;icl++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++) {
gsl_vector_set(dl_map_bad_b,w->ncls*ib2+icl,
cl_in[icl][ib2]-cl_noise_in[icl][ib2]-cl_bias[icl][ib2]);
}
}
gsl_linalg_LU_solve(w->coupling_matrix_binned_gsl,w->coupling_matrix_perm,dl_map_bad_b,dl_map_good_b);
for(icl=0;icl<w->ncls;icl++) {
for(ib2=0;ib2<w->bin->n_bands;ib2++)
cl_out[icl][ib2]=gsl_vector_get(dl_map_good_b,w->ncls*ib2+icl);
}
gsl_vector_free(dl_map_bad_b);
gsl_vector_free(dl_map_good_b);
}
void nmt_compute_coupled_cell_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,flouble **cl_out,
flouble lmn_x,flouble lmx_x,flouble lmn_y,flouble lmx_y)
{
if(check_flatsky_infos(fl1->fs,fl2->fs))
report_error(NMT_ERROR_CONSISTENT_RESO,"Can only correlate fields defined on the same pixels!\n");
fs_alm2cl(fl1->fs,bin,fl1->alms,fl2->alms,fl1->pol,fl2->pol,cl_out,lmn_x,lmx_x,lmn_y,lmx_y);
}
nmt_workspace_flat *nmt_compute_power_spectra_flat(nmt_field_flat *fl1,nmt_field_flat *fl2,
nmt_binning_scheme_flat *bin,
flouble lmn_x,flouble lmx_x,
flouble lmn_y,flouble lmx_y,
nmt_workspace_flat *w0,flouble **cl_noise,
int nl_prop,flouble *l_prop,flouble **cl_prop,
flouble **cl_out)
{
int ii;
flouble **cl_bias,**cl_data;
nmt_workspace_flat *w;
if(w0==NULL)
w=nmt_compute_coupling_matrix_flat(fl1,fl2,bin,lmn_x,lmx_x,lmn_y,lmx_y);
else {
w=w0;
if((check_flatsky_infos(fl1->fs,w->fs)) || (check_flatsky_infos(fl2->fs,w->fs)))
report_error(NMT_ERROR_CONSISTENT_RESO,"Input workspace has different pixels!\n");
if(bin->n_bands!=w->bin->n_bands)
report_error(NMT_ERROR_CONSISTENT_RESO,"Input workspace has different bandpowers!\n");
}
cl_bias=my_malloc(w->ncls*sizeof(flouble *));
cl_data=my_malloc(w->ncls*sizeof(flouble *));
for(ii=0;ii<w->ncls;ii++) {
cl_bias[ii]=my_calloc(w->bin->n_bands,sizeof(flouble));
cl_data[ii]=my_calloc(w->bin->n_bands,sizeof(flouble));
}
nmt_compute_coupled_cell_flat(fl1,fl2,bin,cl_data,lmn_x,lmx_x,lmn_y,lmx_y);
nmt_compute_deprojection_bias_flat(fl1,fl2,bin,lmn_x,lmx_x,lmn_y,lmx_y,
nl_prop,l_prop,cl_prop,cl_bias);
nmt_decouple_cl_l_flat(w,cl_data,cl_noise,cl_bias,cl_out);
for(ii=0;ii<w->ncls;ii++) {
free(cl_bias[ii]);
free(cl_data[ii]);
}
free(cl_bias);
free(cl_data);
return w;
}
|
RecordTable.h | /*
* Souffle - A Datalog Compiler
* Copyright (c) 2020, The Souffle Developers. All rights reserved.
* Licensed under the Universal Permissive License v 1.0 as shown at:
* - https://opensource.org/licenses/UPL
* - <souffle root>/licenses/SOUFFLE-UPL.txt
*/
/************************************************************************
*
* @file RecordTable.h
*
* Data container implementing a map between records and their references.
* Records are separated by arity, i.e., stored in different RecordMaps.
*
***********************************************************************/
#pragma once
#include "CompiledTuple.h"
#include "ParallelUtils.h"
#include "RamTypes.h"
#include <cassert>
#include <iostream>
#include <limits>
#include <map>
#include <unordered_map>
#include <vector>
namespace souffle {
/** @brief Bidirectional mappping between records and record references */
class RecordMap {
/** arity of record */
const size_t arity;
/** hash function for unordered record map */
struct RecordHash {
std::size_t operator()(std::vector<RamDomain> record) const {
std::size_t seed = 0;
std::hash<RamDomain> domainHash;
for (RamDomain value : record) {
seed ^= domainHash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
return seed;
}
};
/** map from records to references */
// TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal
std::unordered_map<std::vector<RamDomain>, RamDomain, RecordHash> recordToIndex;
/** array of records; index represents record reference */
// TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal
std::vector<std::vector<RamDomain>> indexToRecord;
public:
explicit RecordMap(size_t arity) : arity(arity), indexToRecord(1) {} // note: index 0 element left free
/** @brief converts record to a record reference */
// TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal
RamDomain pack(const std::vector<RamDomain>& vector) {
RamDomain index;
#pragma omp critical(record_pack)
{
auto pos = recordToIndex.find(vector);
if (pos != recordToIndex.end()) {
index = pos->second;
} else {
#pragma omp critical(record_unpack)
{
indexToRecord.push_back(vector);
index = indexToRecord.size() - 1;
recordToIndex[vector] = index;
// assert that new index is smaller than the range
assert(index != std::numeric_limits<RamDomain>::max());
}
}
}
return index;
}
/** @brief convert record pointer to a record reference */
RamDomain pack(const RamDomain* tuple) {
// TODO (b-scholz): data is unnecessarily copied
// for a successful lookup. To avoid this, we should
// compute a hash of the pointer-array and traverse through
// the bucket list of the unordered map finding the record.
// Note that in case of non-existence, the record still needs to be
// copied for the newly created entry but this will be the less
// frequent case.
std::vector<RamDomain> tmp(arity);
for (size_t i = 0; i < arity; i++) {
tmp[i] = tuple[i];
}
return pack(tmp);
}
/** @brief convert record reference to a record pointer */
const RamDomain* unpack(RamDomain index) const {
const RamDomain* res;
#pragma omp critical(record_unpack)
res = indexToRecord[index].data();
return res;
}
};
class RecordTable {
public:
RecordTable() = default;
virtual ~RecordTable() = default;
/** @brief convert record to record reference */
RamDomain pack(RamDomain* tuple, size_t arity) {
return lookupArity(arity).pack(tuple);
}
/** @brief convert record reference to a record */
const RamDomain* unpack(RamDomain ref, size_t arity) const {
auto iter = maps.find(arity);
assert(iter != maps.end() && "Attempting to unpack non-existing record");
return (iter->second).unpack(ref);
}
private:
/** @brief lookup RecordMap for a given arity; if it does not exist, create new RecordMap */
RecordMap& lookupArity(size_t arity) {
std::unordered_map<size_t, RecordMap>::iterator mapsIterator;
#pragma omp critical(RecordTableGetForArity)
{
// This will create a new map if it doesn't exist yet.
mapsIterator = maps.emplace(arity, arity).first;
}
return mapsIterator->second;
}
/** Arity/RecordMap association */
std::unordered_map<size_t, RecordMap> maps;
};
/** @brief helper to convert tuple to record reference for the synthesiser */
template <std::size_t Arity>
inline RamDomain pack(RecordTable& recordTab, Tuple<RamDomain, Arity> tuple) {
return recordTab.pack(static_cast<RamDomain*>(tuple.data), Arity);
}
} // namespace souffle
|
GB_unop__abs_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_fp64_fc64
// op(A') function: GB_unop_tran__abs_fp64_fc64
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = cabs (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cabs (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = cabs (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_fp64_fc64
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = cabs (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_fp64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_uint32
// op(A') function: GB_unop_tran__identity_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_uint32
(
int64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr);
c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->constraints[ii] =
constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr);
c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int ii;
int N = dims->N;
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
opts->num_threads = ACADOS_NUM_THREADS;
#endif
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
opts->warm_start_first_qp = false;
opts->step_length = 1.0;
// submodules opts
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// overwrite default
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "reuse_workspace"))
{
int* reuse_workspace = (int *) value;
opts->reuse_workspace = *reuse_workspace;
}
else if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "tol_stat"))
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq"))
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq"))
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp"))
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
// dynamics
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value);
// constraints TODO disabled for now as prevents convergence !!!
// for (ii=0; ii<=N; ii++)
// config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "step_length"))
{
double* step_length = (double *) value;
opts->step_length = *step_length;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else
{
printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_sqp_dynamics_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_dynamics_config *dyn_config = config->dynamics[stage];
dyn_config->opts_set(dyn_config, opts->dynamics[stage], field, value);
return;
}
void ocp_nlp_sqp_cost_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_cost_config *cost_config = config->cost[stage];
cost_config->opts_set(cost_config, opts->cost[stage], field, value);
return;
}
void ocp_nlp_sqp_constraints_opts_set(void *config_, void *opts_, int stage,
const char *field, void *value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_constraints_config *constraints_config = config->constraints[stage];
constraints_config->opts_set(constraints_config, opts->constraints[stage], (char *) field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// nlp res
size += ocp_nlp_res_calculate_size(dims);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
// dzduxt
size += (N+1)*sizeof(struct blasfeo_dmat);
for (ii=0; ii<=N; ii++)
size += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
// z_alg
size += (N+1)*sizeof(struct blasfeo_dvec);
for (ii=0; ii<=N; ii++)
size += blasfeo_memsize_dvec(nz[ii]);
size += 1*8; // blasfeo_str align
size += 1*64; // blasfeo_mem align
size += 8; // initial align
// make_int_multiple_of(64, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
int N = dims->N;
int *nx = dims->nx;
int *nu = dims->nu;
int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
// qp in
mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// QP solver
mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// nlp res
mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr);
c_ptr += mem->nlp_res->memsize;
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims);
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr);
c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
mem->constraints[ii] = constraints[ii]->memory_assign(
constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr);
c_ptr += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
// blasfeo_str align
align_char_to(8, &c_ptr);
// dzduxt
mem->dzduxt = (struct blasfeo_dmat *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dmat);
// z_alg
mem->z_alg = (struct blasfeo_dvec *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dvec);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, c_ptr);
c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
}
// z_alg
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[ii]);
}
mem->status = ACADOS_READY;
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
int size_tmp = 0;
int tmp;
// sqp
size += sizeof(ocp_nlp_sqp_work);
// tmp qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// array of pointers
// cost
size += (N + 1) * sizeof(void *);
// dynamics
size += N * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (ii = 0; ii < N; ii++)
{
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (ii = 0; ii <= N; ii++)
{
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (ii = 0; ii <= N; ii++)
{
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
return size;
}
static void ocp_nlp_sqp_cast_workspace(void *config_, ocp_nlp_dims *dims, ocp_nlp_sqp_work *work,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_opts *opts)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_work);
// tmp qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// array of pointers
//
work->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii],
opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii],
dims->constraints[ii], opts->constraints[ii]);
}
}
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
static void initialize_qp(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem,
ocp_nlp_sqp_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
int ii;
int N = dims->N;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// cost
config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], nlp_in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
// dynamics
if (ii < N)
config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
// constraints
config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii],
nlp_in->constraints[ii], opts->constraints[ii],
mem->constraints[ii], work->constraints[ii]);
}
return;
}
static void linearize_update_qp_matrices(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0);
// dynamics
if (i < N)
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
nlp_in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], nlp_in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
nlp_in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, nlp_mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, nlp_mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, nlp_mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, nlp_mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], nlp_mem->dyn_adj+i, nu[i],
nlp_mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, nlp_mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, nlp_mem->ineq_adj + i, 0);
}
for (i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if (i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
return;
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
static void sqp_update_qp_vectors(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts,
ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], nlp_mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], nlp_mem->dyn_fun + i, 0, mem->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], nlp_mem->ineq_fun + i, 0, mem->qp_in->d + i, 0);
}
return;
}
static void sqp_update_variables(void *config_, ocp_nlp_dims *dims, ocp_nlp_out *nlp_out,
ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem,
ocp_nlp_sqp_work *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
// ocp_nlp_config *config = (ocp_nlp_config *) config_;
double alpha = opts->step_length;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// (full) step in primal variables
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, nlp_out->ux + i, 0, nlp_out->ux + i, 0);
// update dual variables
if (i < N)
{
blasfeo_dvecsc(nx[i+1], 1.0-alpha, nlp_out->pi+i, 0);
blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, nlp_out->pi+i, 0, nlp_out->pi+i, 0);
}
blasfeo_dvecsc(2*ni[i], 1.0-alpha, nlp_out->lam+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, nlp_out->lam+i, 0, nlp_out->lam+i, 0);
// update slack values
blasfeo_dvecsc(2*ni[i], 1.0-alpha, nlp_out->t+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, nlp_out->t+i, 0, nlp_out->t+i, 0);
// linear update of algebraic variables using state and input sensitivity
if (i < N)
{
blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0,
1.0, mem->z_alg+i, 0, nlp_out->z+i, 0);
}
}
return;
}
// Simple fixed-step Gauss-Newton based SQP routine
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
acados_timer timer0, timer1;
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
// zero timers
double total_time = 0.0;
mem->time_qp_sol = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(mem->qp_in->BAbt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(mem->dzduxt+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(mem->nlp_mem->sim_guess+ii,
mem->nlp_mem->set_sim_guess+ii, mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux + ii, mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(mem->dzduxt+ii, mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq + ii, mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(mem->qp_in->Z + ii, mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(mem->z_alg+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(mem->dzduxt+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(mem->qp_in->DCt+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(mem->qp_in->RSQrq+ii, mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(mem->qp_in->idxb[ii], mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_ptr(mem->qp_in->idxs[ii], mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, mem->qp_in->RSQrq, mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, mem->qp_in->rqz, mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, mem->qp_in->BAbt, mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, mem->qp_in->b, mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, mem->qp_in->idxb, mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, mem->qp_in->DCt, mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, mem->qp_out->ux, mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, mem->qp_out->pi, mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, mem->qp_out->lam, mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
initialize_qp(config, dims, nlp_in, nlp_out, opts, mem, work);
// main sqp loop
int sqp_iter = 0;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
// printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter);
// if (sqp_iter==2)
// exit(1);
// linearizate NLP and update QP matrices
acados_tic(&timer1);
linearize_update_qp_matrices(config, dims, nlp_in, nlp_out, opts, mem, work);
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
sqp_update_qp_vectors(config, dims, nlp_in, nlp_out, opts, mem, work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, mem->nlp_mem);
nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_b :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_d :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ?
mem->nlp_res->inf_norm_res_m :
nlp_out->inf_norm_res;
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g;
mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b;
mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d;
mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m;
}
// exit conditions on residuals
if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) &
(mem->nlp_res->inf_norm_res_b < opts->tol_eq) &
(mem->nlp_res->inf_norm_res_d < opts->tol_ineq) &
(mem->nlp_res->inf_norm_res_m < opts->tol_comp))
{
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
return mem->status;
}
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// if (sqp_iter==1)
// exit(1);
// (typically) no warm start at first iteration
if (sqp_iter == 0 && !opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, mem->qp_in, mem->qp_out,
opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize, dims->regularize,
opts->regularize, mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if (sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
// printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter);
qp_iter = qp_info_->num_iter;
// save statistics of last qp solver call
if (sqp_iter+1 < mem->stat_m)
{
mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status;
mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter;
}
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(mem->qp_in, mem->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(mem->qp_out);
// if (sqp_iter==1)
// exit(1);
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(mem->qp_in);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
sqp_update_variables(config, dims, nlp_out, opts, mem, work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
}
// stop timer
total_time += acados_toc(&timer0);
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// printf("%d sqp iterations\n", sqp_iter);
// print_ocp_qp_in(mem->qp_in);
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(all) add flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii],
"ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_precompute: inconsistent dimension ns with constraint module.");
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->dynamics[ii],
mem->dynamics[ii], work->dynamics[ii]);
if (status != ACADOS_SUCCESS) return status;
}
return status;
}
void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_work *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts);
d_ocp_qp_copy_all(mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
void ocp_nlp_sqp_get(void *config_, void *mem_, const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_res;
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->dynamics_opts_set = &ocp_nlp_sqp_dynamics_opts_set;
config->cost_opts_set = &ocp_nlp_sqp_cost_opts_set;
config->constraints_opts_set = &ocp_nlp_sqp_constraints_opts_set;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
return;
}
|
atom_symmetry_class.h | // Copyright (c) 2013-2016 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file atom_symmetry_class.h
*
* \brief Contains declaration and partial implementation of sirius::Atom_symmetry_class class.
*/
#ifndef __ATOM_SYMMETRY_CLASS_H__
#define __ATOM_SYMMETRY_CLASS_H__
//#include "sirius_io.h"
#include "runtime.h"
#include "atom_type.h"
#include "communicator.hpp"
#include "eigenproblem.h"
namespace sirius {
/// Data and methods specific to the symmetry class of the atom.
/** Atoms transforming into each other under symmetry opeartions belong to the same symmetry class. They have the
* same spherical part of the on-site potential and, as a consequence, the same radial functions.
*/
class Atom_symmetry_class
{
private:
/// Symmetry class id in the range [0, N_class).
int id_;
/// List of atoms of this class.
std::vector<int> atom_id_;
/// Pointer to atom type.
Atom_type const& atom_type_;
/// Spherical part of the effective potential.
/** Used by the LAPW radial solver. */
std::vector<double> spherical_potential_;
/// List of radial functions for the LAPW basis.
/** This array stores all the radial functions (AW and LO) and their derivatives. Radial derivatives of functions
* are multiplied by \f$ x \f$.\n
* 1-st dimension: index of radial point \n
* 2-nd dimension: index of radial function \n
* 3-nd dimension: 0 - function itself, 1 - radial derivative */
mdarray<double, 3> radial_functions_;
/// Surface derivatives of AW radial functions.
mdarray<double, 3> aw_surface_derivatives_;
/// Spherical part of radial integral.
mdarray<double, 2> h_spherical_integrals_;
/// Overlap integrals.
mdarray<double, 3> o_radial_integrals_;
/// Overlap integrals for IORA relativistic treatment.
mdarray<double, 2> o1_radial_integrals_;
/// Spin-orbit interaction integrals.
mdarray<double, 3> so_radial_integrals_;
/// Core charge density.
/** All-electron core charge density of the LAPW method. It is recomputed on every SCF iteration due to
the change of effective potential. */
std::vector<double> ae_core_charge_density_;
/// Core eigen-value sum.
double core_eval_sum_{0};
/// Core leakage.
double core_leakage_{0};
/// list of radial descriptor sets used to construct augmented waves
mutable std::vector<radial_solution_descriptor_set> aw_descriptors_;
/// list of radial descriptor sets used to construct local orbitals
mutable std::vector<local_orbital_descriptor> lo_descriptors_;
/// Generate radial functions for augmented waves
inline void generate_aw_radial_functions(relativity_t rel__);
/// Generate local orbital raidal functions
inline void generate_lo_radial_functions(relativity_t rel__);
public:
/// Constructor
Atom_symmetry_class(int id_, Atom_type const& atom_type_)
: id_(id_)
, atom_type_(atom_type_)
{
if (!atom_type_.initialized()) {
TERMINATE("atom type is not initialized");
}
aw_surface_derivatives_ = mdarray<double, 3>(atom_type_.max_aw_order(), atom_type_.num_aw_descriptors(), 3);
radial_functions_ = mdarray<double, 3>(atom_type_.num_mt_points(), atom_type_.mt_radial_basis_size(), 2);
h_spherical_integrals_ = mdarray<double, 2>(atom_type_.mt_radial_basis_size(), atom_type_.mt_radial_basis_size());
h_spherical_integrals_.zero();
o_radial_integrals_ = mdarray<double, 3>(atom_type_.indexr().lmax() + 1, atom_type_.indexr().max_num_rf(),
atom_type_.indexr().max_num_rf());
o_radial_integrals_.zero();
so_radial_integrals_ = mdarray<double, 3>(atom_type_.indexr().lmax() + 1, atom_type_.indexr().max_num_rf(),
atom_type_.indexr().max_num_rf());
so_radial_integrals_.zero();
if (atom_type_.parameters().valence_relativity() == relativity_t::iora) {
o1_radial_integrals_ = mdarray<double, 2>(atom_type_.mt_radial_basis_size(), atom_type_.mt_radial_basis_size());
o1_radial_integrals_.zero();
}
/* copy descriptors because enu is defferent between atom classes */
aw_descriptors_.resize(atom_type_.num_aw_descriptors());
for (int i = 0; i < num_aw_descriptors(); i++) {
aw_descriptors_[i] = atom_type_.aw_descriptor(i);
}
lo_descriptors_.resize(atom_type_.num_lo_descriptors());
for (int i = 0; i < num_lo_descriptors(); i++) {
lo_descriptors_[i] = atom_type_.lo_descriptor(i);
}
ae_core_charge_density_.resize(atom_type_.num_mt_points());
std::memset(&ae_core_charge_density_[0], 0, atom_type_.num_mt_points() * sizeof(double));
}
/// Set the spherical component of the potential
/** Atoms belonging to the same symmetry class have the same spherical potential. */
inline void set_spherical_potential(std::vector<double> const& vs__);
inline void generate_radial_functions(relativity_t rel__);
inline void sync_radial_functions(Communicator const& comm__, int const rank__);
inline void sync_radial_integrals(Communicator const& comm__, int const rank__);
inline void sync_core_charge_density(Communicator const& comm__, int const rank__);
/// Check if local orbitals are linearly independent
inline std::vector<int> check_lo_linear_independence(double etol__);
/// Dump local orbitals to the file for debug purposes
inline void dump_lo();
/// Find core states and generate core density.
inline void generate_core_charge_density(relativity_t core_rel__);
inline void find_enu(relativity_t rel__);
inline void write_enu(runtime::pstdout& pout) const;
/// Generate radial overlap and SO integrals
/** In the case of spin-orbit interaction the following integrals are computed:
* \f[
* \int f_{p}(r) \Big( \frac{1}{(2 M c)^2} \frac{1}{r} \frac{d V}{d r} \Big) f_{p'}(r) r^2 dr
* \f]
*
* Relativistic mass M is defined as
* \f[
* M = 1 - \frac{1}{2 c^2} V
* \f]
*/
inline void generate_radial_integrals(relativity_t rel__);
/// Compute m-th order radial derivative at the MT surface.
inline double aw_surface_dm(int l, int order, int dm) const
{
assert(dm <= 2);
return aw_surface_derivatives_(order, l, dm);
}
inline void set_aw_surface_deriv(int l, int order, int dm, double deriv)
{
assert(dm <= 2);
aw_surface_derivatives_(order, l, dm) = deriv;
}
/// Return symmetry class id.
inline int id() const
{
return id_;
}
/// Add atom id to the current class.
inline void add_atom_id(int atom_id__)
{
atom_id_.push_back(atom_id__);
}
/// Return number of atoms belonging to the current symmetry class.
inline int num_atoms() const
{
return static_cast<int>(atom_id_.size());
}
inline int atom_id(int idx) const
{
return atom_id_[idx];
}
/// Get a value of the radial functions.
inline double radial_function(int ir, int idx) const
{
return radial_functions_(ir, idx, 0);
}
/// Get a reference to the value of the radial function.
inline double& radial_function(int ir, int idx)
{
return radial_functions_(ir, idx, 0);
}
/// Get a value of the radial function derivative.
inline double radial_function_derivative(int ir, int idx) const
{
return radial_functions_(ir, idx, 1);
}
/// Get a reference to the value of the radial function derivative.
inline double& radial_function_derivative(int ir, int idx)
{
return radial_functions_(ir, idx, 1);
}
inline double h_spherical_integral(int i1, int i2) const
{
return h_spherical_integrals_(i1, i2);
}
inline double const& o_radial_integral(int l, int order1, int order2) const
{
return o_radial_integrals_(l, order1, order2);
}
inline void set_o_radial_integral(int l, int order1, int order2, double oint__)
{
o_radial_integrals_(l, order1, order2) = oint__;
}
inline double const& o1_radial_integral(int xi1__, int xi2__) const
{
return o1_radial_integrals_(xi1__, xi2__);
}
inline void set_o1_radial_integral(int idxrf1__, int idxrf2__, double val__)
{
o1_radial_integrals_(idxrf1__, idxrf2__) = val__;
}
inline double so_radial_integral(int l, int order1, int order2) const
{
return so_radial_integrals_(l, order1, order2);
}
inline double ae_core_charge_density(int ir) const
{
assert(ir >= 0 && ir < (int)ae_core_charge_density_.size());
return ae_core_charge_density_[ir];
}
inline Atom_type const& atom_type() const
{
return atom_type_;
}
inline double core_eval_sum() const
{
return core_eval_sum_;
}
inline double core_leakage() const
{
return core_leakage_;
}
inline int num_aw_descriptors() const
{
return static_cast<int>(aw_descriptors_.size());
}
inline radial_solution_descriptor_set& aw_descriptor(int idx__) const
{
return aw_descriptors_[idx__];
}
inline int num_lo_descriptors() const
{
return static_cast<int>(lo_descriptors_.size());
}
inline local_orbital_descriptor& lo_descriptor(int idx__) const
{
return lo_descriptors_[idx__];
}
inline void set_aw_enu(int l, int order, double enu)
{
aw_descriptors_[l][order].enu = enu;
}
inline double get_aw_enu(int l, int order) const
{
return aw_descriptors_[l][order].enu;
}
inline void set_lo_enu(int idxlo, int order, double enu)
{
lo_descriptors_[idxlo].rsd_set[order].enu = enu;
}
inline double get_lo_enu(int idxlo, int order) const
{
return lo_descriptors_[idxlo].rsd_set[order].enu;
}
};
inline void Atom_symmetry_class::generate_aw_radial_functions(relativity_t rel__)
{
int nmtp = atom_type_.num_mt_points();
Radial_solver solver(atom_type_.zn(), spherical_potential_, atom_type_.radial_grid());
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
std::vector<double> p;
std::vector<double> rdudr;
std::array<double, 2> uderiv;
#pragma omp for schedule(dynamic, 1)
for (int l = 0; l < num_aw_descriptors(); l++) {
for (int order = 0; order < (int)aw_descriptor(l).size(); order++) {
auto rsd = aw_descriptor(l)[order];
int idxrf = atom_type_.indexr().index_by_l_order(l, order);
solver.solve(rel__, rsd.dme, rsd.l, rsd.enu, p, rdudr, uderiv);
/* normalize */
for (int ir = 0; ir < nmtp; ir++) {
s(ir) = std::pow(p[ir], 2);
}
double norm = 1.0 / std::sqrt(s.interpolate().integrate(0));
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) = p[ir] * norm;
radial_functions_(ir, idxrf, 1) = rdudr[ir] * norm;
}
aw_surface_derivatives_(order, l, 0) = norm * p.back() / atom_type_.mt_radius();
for (int i: {0, 1}) {
aw_surface_derivatives_(order, l, i + 1) = uderiv[i] * norm;
}
/* orthogonalize to previous radial functions */
for (int order1 = 0; order1 < order; order1++) {
int idxrf1 = atom_type_.indexr().index_by_l_order(l, order1);
for (int ir = 0; ir < nmtp; ir++) {
s(ir) = radial_functions_(ir, idxrf, 0) * radial_functions_(ir, idxrf1, 0);
}
/* <u_{\nu'}|u_{\nu}> */
double ovlp = s.interpolate().integrate(0);
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) -= radial_functions_(ir, idxrf1, 0) * ovlp;
radial_functions_(ir, idxrf, 1) -= radial_functions_(ir, idxrf1, 1) * ovlp;
}
for (int i: {0, 1, 2}) {
aw_surface_derivatives_(order, l, i) -= aw_surface_derivatives_(order1, l, i) * ovlp;
}
}
/* normalize again */
for (int ir = 0; ir < nmtp; ir++) {
s(ir) = std::pow(radial_functions_(ir, idxrf, 0), 2);
}
norm = s.interpolate().integrate(0);
if (std::abs(norm) < 1e-10) {
TERMINATE("aw radial functions are linearly dependent");
}
norm = 1.0 / std::sqrt(norm);
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) *= norm;
radial_functions_(ir, idxrf, 1) *= norm;
}
for (int i: {0, 1, 2}) {
aw_surface_derivatives_(order, l, i) *= norm;
}
}
/* divide by r */
for (int order = 0; order < (int)aw_descriptor(l).size(); order++) {
int idxrf = atom_type_.indexr().index_by_l_order(l, order);
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) *= atom_type_.radial_grid().x_inv(ir);
}
}
}
}
}
inline void Atom_symmetry_class::generate_lo_radial_functions(relativity_t rel__)
{
int nmtp = atom_type_.num_mt_points();
Radial_solver solver(atom_type_.zn(), spherical_potential_, atom_type_.radial_grid());
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
double a[3][3];
#pragma omp for schedule(dynamic, 1)
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
/* number of radial solutions */
int num_rs = static_cast<int>(lo_descriptor(idxlo).rsd_set.size());
assert(num_rs <= 3);
std::vector<std::vector<double>> p(num_rs);
std::vector<std::vector<double>> rdudr(num_rs);
std::array<double, 2> uderiv;
for (int order = 0; order < num_rs; order++) {
auto rsd = lo_descriptor(idxlo).rsd_set[order];
solver.solve(rel__, rsd.dme, rsd.l, rsd.enu, p[order], rdudr[order], uderiv);
/* find norm of the radial solution */
for (int ir = 0; ir < nmtp; ir++) {
s(ir) = std::pow(p[order][ir], 2);
}
double norm = 1.0 / std::sqrt(s.interpolate().integrate(0));
/* normalize radial solution and divide by r */
for (int ir = 0; ir < nmtp; ir++) {
p[order][ir] *= (norm * atom_type_.radial_grid().x_inv(ir));
/* don't divide rdudr by r */
rdudr[order][ir] *= norm;
}
uderiv[0] *= norm;
uderiv[1] *= norm;
/* matrix of derivatives */
a[order][0] = p[order].back();
a[order][1] = uderiv[0];
a[order][2] = uderiv[1];
}
double b[] = {0, 0, 0};
b[num_rs - 1] = 1.0;
int info = linalg<CPU>::gesv(num_rs, 1, &a[0][0], 3, b, 3);
if (info) {
std::stringstream s;
s << "a[i][j] = ";
for (int i = 0; i < num_rs; i++) {
for (int j = 0; j < num_rs; j++) {
s << a[i][j] << " ";
}
}
s << std::endl;
s << "atom: " << atom_type_.label() << std::endl
<< "zn: " << atom_type_.zn() << std::endl
<< "l: " << lo_descriptor(idxlo).l << std::endl;
s << "gesv returned " << info;
TERMINATE(s);
}
/* index of local orbital radial function */
int idxrf = atom_type_.indexr().index_by_idxlo(idxlo);
/* take linear combination of radial solutions */
for (int order = 0; order < num_rs; order++) {
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) += b[order] * p[order][ir];
radial_functions_(ir, idxrf, 1) += b[order] * rdudr[order][ir];
}
}
/* find norm of constructed local orbital */
for (int ir = 0; ir < nmtp; ir++) {
s(ir) = std::pow(radial_functions_(ir, idxrf, 0), 2);
}
double norm = 1.0 / std::sqrt(s.interpolate().integrate(2));
/* normalize */
for (int ir = 0; ir < nmtp; ir++) {
radial_functions_(ir, idxrf, 0) *= norm;
radial_functions_(ir, idxrf, 1) *= norm;
}
if (std::abs(radial_functions_(nmtp - 1, idxrf, 0)) > 1e-10) {
std::stringstream s;
s << "local orbital " << idxlo << " is not zero at MT boundary" << std::endl
<< " atom symmetry class id : " << id() << " (" << atom_type().symbol() << ")" << std::endl
<< " value : " << radial_functions_(nmtp - 1, idxrf, 0) << std::endl
<< " number of MT points: " << nmtp << std::endl
<< " MT radius: " << atom_type_.radial_grid().last() << std::endl
<< " b_coeffs: ";
for (int j = 0; j < num_rs; j++) {
s << b[j] << " ";
}
WARNING(s);
}
}
}
if (atom_type_.parameters().control().verification_ > 0 && num_lo_descriptors() > 0) {
check_lo_linear_independence(0.0001);
}
}
inline std::vector<int> Atom_symmetry_class::check_lo_linear_independence(double tol__)
{
int nmtp = atom_type_.num_mt_points();
Spline<double> s(atom_type_.radial_grid());
dmatrix<double> loprod(num_lo_descriptors(), num_lo_descriptors());
loprod.zero();
for (int idxlo1 = 0; idxlo1 < num_lo_descriptors(); idxlo1++) {
int idxrf1 = atom_type_.indexr().index_by_idxlo(idxlo1);
for (int idxlo2 = 0; idxlo2 < num_lo_descriptors(); idxlo2++) {
int idxrf2 = atom_type_.indexr().index_by_idxlo(idxlo2);
if (lo_descriptor(idxlo1).l == lo_descriptor(idxlo2).l) {
for (int ir = 0; ir < nmtp; ir++) {
s(ir) = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0);
}
loprod(idxlo1, idxlo2) = s.interpolate().integrate(2);
}
}
}
mdarray<double, 2> ovlp(num_lo_descriptors(), num_lo_descriptors());
loprod >> ovlp;
Eigensolver_lapack<double> stdevp;
std::vector<double> loprod_eval(num_lo_descriptors());
dmatrix<double> loprod_evec(num_lo_descriptors(), num_lo_descriptors());
stdevp.solve(num_lo_descriptors(), loprod, &loprod_eval[0], loprod_evec);
if (std::abs(loprod_eval[0]) < tol__) {
printf("\n");
printf("local orbitals for atom symmetry class %i are almost linearly dependent\n", id_);
printf("local orbitals overlap matrix:\n");
for (int i = 0; i < num_lo_descriptors(); i++) {
for (int j = 0; j < num_lo_descriptors(); j++) {
printf("%12.6f", ovlp(i, j));
}
printf("\n");
}
printf("overlap matrix eigen-values:\n");
for (int i = 0; i < num_lo_descriptors(); i++) {
printf("%12.6f", loprod_eval[i]);
}
printf("\n");
printf("smallest eigenvalue: %20.16f\n", loprod_eval[0]);
}
std::vector<int> inc(num_lo_descriptors(), 0);
/* try all local orbitals */
for (int i = 0; i < num_lo_descriptors(); i++) {
inc[i] = 1;
std::vector<int> ilo;
for (int j = 0; j < num_lo_descriptors(); j++) {
if (inc[j] == 1) {
ilo.push_back(j);
}
}
std::vector<double> eval(ilo.size());
dmatrix<double> evec(static_cast<int>(ilo.size()), static_cast<int>(ilo.size()));
dmatrix<double> tmp(static_cast<int>(ilo.size()), static_cast<int>(ilo.size()));
for (size_t j1 = 0; j1 < ilo.size(); j1++) {
for (size_t j2 = 0; j2 < ilo.size(); j2++) {
tmp(j1, j2) = ovlp(ilo[j1], ilo[j2]);
}
}
stdevp.solve(static_cast<int>(ilo.size()), tmp, &eval[0], evec);
if (eval[0] < tol__) {
printf("local orbital %i can be removed\n", i);
inc[i] = 0;
}
}
return inc;
}
inline void Atom_symmetry_class::dump_lo()
{
std::stringstream s;
s << "local_orbitals_" << id_ << ".dat";
FILE* fout = fopen(s.str().c_str(), "w");
for (int ir = 0; ir <atom_type_.num_mt_points(); ir++) {
fprintf(fout, "%f ", atom_type_.radial_grid(ir));
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
int idxrf = atom_type_.indexr().index_by_idxlo(idxlo);
fprintf(fout, "%f ", radial_functions_(ir, idxrf, 0));
}
fprintf(fout, "\n");
}
fclose(fout);
s.str("");
s << "local_orbitals_deriv_" << id_ << ".dat";
fout = fopen(s.str().c_str(), "w");
for (int ir = 0; ir <atom_type_.num_mt_points(); ir++) {
fprintf(fout, "%f ", atom_type_.radial_grid(ir));
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
int idxrf = atom_type_.indexr().index_by_idxlo(idxlo);
fprintf(fout, "%f ", radial_functions_(ir, idxrf, 1));
}
fprintf(fout, "\n");
}
fclose(fout);
}
inline void Atom_symmetry_class::set_spherical_potential(std::vector<double> const& vs__)
{
if (atom_type_.num_mt_points() != (int)vs__.size()) {
TERMINATE("wrong size of effective potential array");
}
spherical_potential_ = vs__;
//HDF5_tree fout("mt_potential.h5", true);
//fout.write("potential", spherical_potential_);
///* write spherical potential */
//std::stringstream sstr;
//sstr << "mt_spheric_potential_" << id_ << ".dat";
//FILE* fout = fopen(sstr.str().c_str(), "w");
//for (int ir = 0; ir < atom_type_.num_mt_points(); ir++) {
// double r = atom_type_.radial_grid(ir);
// fprintf(fout, "%20.10f %20.10f \n", r, spherical_potential_[ir] + atom_type_.zn() / r);
//}
//fclose(fout);
}
inline void Atom_symmetry_class::find_enu(relativity_t rel__)
{
PROFILE("sirius::Atom_symmetry_class::find_enu");
std::vector<radial_solution_descriptor*> rs_with_auto_enu;
/* find which aw functions need auto enu */
for (int l = 0; l < num_aw_descriptors(); l++) {
for (size_t order = 0; order < aw_descriptor(l).size(); order++) {
auto& rsd = aw_descriptor(l)[order];
if (rsd.auto_enu) {
rs_with_auto_enu.push_back(&rsd);
}
}
}
/* find which lo functions need auto enu */
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
/* number of radial solutions */
size_t num_rs = lo_descriptor(idxlo).rsd_set.size();
for (size_t order = 0; order < num_rs; order++) {
auto& rsd = lo_descriptor(idxlo).rsd_set[order];
if (rsd.auto_enu) {
rs_with_auto_enu.push_back(&rsd);
}
}
}
#pragma omp parallel for
for (size_t i = 0; i < rs_with_auto_enu.size(); i++) {
auto rsd = rs_with_auto_enu[i];
rsd->enu = Enu_finder(rel__, atom_type_.zn(), rsd->n, rsd->l, atom_type_.radial_grid(), spherical_potential_, rsd->enu).enu();
}
}
inline void Atom_symmetry_class::generate_radial_functions(relativity_t rel__)
{
PROFILE("sirius::Atom_symmetry_class::generate_radial_functions");
radial_functions_.zero();
find_enu(rel__);
generate_aw_radial_functions(rel__);
generate_lo_radial_functions(rel__);
#ifdef __PRINT_OBJECT_CHECKSUM
DUMP("checksum(spherical_potential): %18.10f", mdarray<double, 1>(spherical_potential_.data(), atom_type_.num_mt_points()).checksum());
DUMP("checksum(radial_functions): %18.10f", radial_functions_.checksum());
#endif
//** if (true)
//** {
//** std::stringstream s;
//** s << "radial_functions_" << id_ << ".dat";
//** FILE* fout = fopen(s.str().c_str(), "w");
//** for (int ir = 0; ir <atom_type_.num_mt_points(); ir++)
//** {
//** fprintf(fout, "%f ", atom_type_.radial_grid(ir));
//** for (int idxrf = 0; idxrf < atom_type_.indexr().size(); idxrf++)
//** {
//** fprintf(fout, "%f ", radial_functions_(ir, idxrf, 0));
//** }
//** fprintf(fout, "\n");
//** }
//** fclose(fout);
//** }
//** STOP();
}
inline void Atom_symmetry_class::sync_radial_functions(Communicator const& comm__, int const rank__)
{
/* don't broadcast Hamiltonian radial functions, because they are used locally */
int size = (int)(radial_functions_.size(0) * radial_functions_.size(1));
comm__.bcast(radial_functions_.at<CPU>(), size, rank__);
comm__.bcast(aw_surface_derivatives_.at<CPU>(), (int)aw_surface_derivatives_.size(), rank__);
// TODO: sync enu to pass to Exciting / Elk
}
inline void Atom_symmetry_class::sync_radial_integrals(Communicator const& comm__, int const rank__)
{
comm__.bcast(h_spherical_integrals_.at<CPU>(), (int)h_spherical_integrals_.size(), rank__);
comm__.bcast(o_radial_integrals_.at<CPU>(), (int)o_radial_integrals_.size(), rank__);
comm__.bcast(so_radial_integrals_.at<CPU>(), (int)so_radial_integrals_.size(), rank__);
if (atom_type_.parameters().valence_relativity() == relativity_t::iora) {
comm__.bcast(o1_radial_integrals_.at<CPU>(), (int)o1_radial_integrals_.size(), rank__);
}
}
inline void Atom_symmetry_class::sync_core_charge_density(Communicator const& comm__, int const rank__)
{
assert(ae_core_charge_density_.size() != 0);
comm__.bcast(&ae_core_charge_density_[0], atom_type_.radial_grid().num_points(), rank__);
comm__.bcast(&core_leakage_, 1, rank__);
comm__.bcast(&core_eval_sum_, 1, rank__);
}
inline void Atom_symmetry_class::generate_radial_integrals(relativity_t rel__)
{
PROFILE("sirius::Atom_symmetry_class::generate_radial_integrals");
int nmtp = atom_type_.num_mt_points();
double sq_alpha_half = 0.5 * std::pow(speed_of_light, -2);
if (rel__ == relativity_t::none) {
sq_alpha_half = 0;
}
h_spherical_integrals_.zero();
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
#pragma omp for
for (int i1 = 0; i1 < atom_type_.mt_radial_basis_size(); i1++) {
for (int i2 = 0; i2 < atom_type_.mt_radial_basis_size(); i2++) {
/* for spherical part of potential integrals are diagonal in l */
if (atom_type_.indexr(i1).l == atom_type_.indexr(i2).l) {
int ll = atom_type_.indexr(i1).l * (atom_type_.indexr(i1).l + 1);
for (int ir = 0; ir < nmtp; ir++) {
double Minv = 1.0 / (1 - spherical_potential_[ir] * sq_alpha_half);
/* u_1(r) * u_2(r) */
double t0 = radial_functions_(ir, i1, 0) * radial_functions_(ir, i2, 0);
/* r*u'_1(r) * r*u'_2(r) */
double t1 = radial_functions_(ir, i1, 1) * radial_functions_(ir, i2, 1);
s(ir) = 0.5 * t1 * Minv + t0 * (0.5 * ll * Minv + spherical_potential_[ir] * std::pow(atom_type_.radial_grid(ir), 2));
}
h_spherical_integrals_(i1, i2) = s.interpolate().integrate(0) / y00;
}
}
}
}
o_radial_integrals_.zero();
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
#pragma omp for
for (int l = 0; l <= atom_type_.indexr().lmax(); l++) {
int nrf = atom_type_.indexr().num_rf(l);
for (int order1 = 0; order1 < nrf; order1++) {
int idxrf1 = atom_type_.indexr().index_by_l_order(l, order1);
for (int order2 = 0; order2 < nrf; order2++) {
int idxrf2 = atom_type_.indexr().index_by_l_order(l, order2);
if (order1 == order2) {
o_radial_integrals_(l, order1, order2) = 1.0;
} else {
for (int ir = 0; ir < nmtp; ir++) {
s(ir) = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0);
}
o_radial_integrals_(l, order1, order2) = s.interpolate().integrate(2);
}
}
}
}
}
if (atom_type_.parameters().valence_relativity() == relativity_t::iora) {
o1_radial_integrals_.zero();
#pragma omp parallel default(shared)
{
Spline<double> s(atom_type_.radial_grid());
#pragma omp for
for (int i1 = 0; i1 < atom_type_.mt_radial_basis_size(); i1++) {
for (int i2 = 0; i2 < atom_type_.mt_radial_basis_size(); i2++) {
/* for spherical part of potential integrals are diagonal in l */
if (atom_type_.indexr(i1).l == atom_type_.indexr(i2).l) {
int ll = atom_type_.indexr(i1).l * (atom_type_.indexr(i1).l + 1);
for (int ir = 0; ir < nmtp; ir++) {
double Minv = std::pow(1 - spherical_potential_[ir] * sq_alpha_half, -2);
/* u_1(r) * u_2(r) */
double t0 = radial_functions_(ir, i1, 0) * radial_functions_(ir, i2, 0);
/* r*u'_1(r) * r*u'_2(r) */
double t1 = radial_functions_(ir, i1, 1) * radial_functions_(ir, i2, 1);
s(ir) = sq_alpha_half * 0.5 * Minv * (t1 + t0 * 0.5 * ll);
}
o1_radial_integrals_(i1, i2) = s.interpolate().integrate(0);
}
}
}
}
}
if (false) // TODO: if it's slow, compute only when spin-orbit is turned on
{
double soc = std::pow(2 * speed_of_light, -2);
Spline<double> s(atom_type_.radial_grid());
Spline<double> s1(atom_type_.radial_grid());
Spline<double> ve(atom_type_.radial_grid());
for (int i = 0; i < nmtp; i++) {
ve(i) = spherical_potential_[i] + atom_type_.zn() / atom_type_.radial_grid(i);
}
ve.interpolate();
so_radial_integrals_.zero();
for (int l = 0; l <= atom_type_.indexr().lmax(); l++)
{
int nrf = atom_type_.indexr().num_rf(l);
for (int order1 = 0; order1 < nrf; order1++)
{
int idxrf1 = atom_type_.indexr().index_by_l_order(l, order1);
for (int order2 = 0; order2 < nrf; order2++)
{
int idxrf2 = atom_type_.indexr().index_by_l_order(l, order2);
for (int ir = 0; ir < nmtp; ir++)
{
double M = 1.0 - 2 * soc * spherical_potential_[ir];
/* first part <f| dVe / dr |f'> */
s(ir) = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0) *
soc * ve.deriv(1, ir) / pow(M, 2);
/* second part <f| d(z/r) / dr |f'> */
s1(ir) = radial_functions_(ir, idxrf1, 0) * radial_functions_(ir, idxrf2, 0) *
soc * atom_type_.zn() / pow(M, 2);
}
s.interpolate();
s1.interpolate();
so_radial_integrals_(l, order1, order2) = s.integrate(1) + s1.integrate(-1);
}
}
}
}
}
inline void Atom_symmetry_class::write_enu(runtime::pstdout& pout) const
{
pout.printf("Atom : %s, class id : %i\n", atom_type_.symbol().c_str(), id_);
pout.printf("augmented waves\n");
for (int l = 0; l < num_aw_descriptors(); l++) {
for (size_t order = 0; order < aw_descriptor(l).size(); order++) {
auto& rsd = aw_descriptor(l)[order];
if (rsd.auto_enu) {
pout.printf("n = %2i l = %2i order = %i enu = %12.6f\n", rsd.n, rsd.l, order, rsd.enu);
}
}
}
pout.printf("local orbitals\n");
for (int idxlo = 0; idxlo < num_lo_descriptors(); idxlo++) {
for (size_t order = 0; order < lo_descriptor(idxlo).rsd_set.size(); order++) {
auto& rsd = lo_descriptor(idxlo).rsd_set[order];
if (rsd.auto_enu) {
pout.printf("n = %2i l = %2i order = %i enu = %12.6f\n", rsd.n, rsd.l, order, rsd.enu);
}
}
}
pout.printf("\n");
}
inline void Atom_symmetry_class::generate_core_charge_density(relativity_t core_rel__)
{
PROFILE("sirius::Atom_symmetry_class::generate_core_charge_density");
/* nothing to do */
if (atom_type_.num_core_electrons() == 0.0) {
return;
}
int nmtp = atom_type_.num_mt_points();
std::vector<double> free_atom_grid(nmtp);
for (int i = 0; i < nmtp; i++) {
free_atom_grid[i] = atom_type_.radial_grid(i);
}
/* extend radial grid */
double x = atom_type_.radial_grid(nmtp - 1);
double dx = atom_type_.radial_grid().dx(nmtp - 2);
while (x < 30.0 + atom_type_.zn() / 4.0) {
x += dx;
free_atom_grid.push_back(x);
dx *= 1.025;
}
Radial_grid_ext<double> rgrid(static_cast<int>(free_atom_grid.size()), free_atom_grid.data());
/* interpolate spherical potential inside muffin-tin */
Spline<double> svmt(atom_type_.radial_grid());
/* remove nucleus contribution from Vmt */
for (int ir = 0; ir < nmtp; ir++) {
svmt(ir) = spherical_potential_[ir] + atom_type_.zn() * atom_type_.radial_grid().x_inv(ir);
}
svmt.interpolate();
/* fit tail to alpha/r + beta */
double alpha = -(std::pow(atom_type_.mt_radius(), 2) * svmt.deriv(1, nmtp - 1) + atom_type_.zn());
double beta = svmt(nmtp - 1) - (atom_type_.zn() + alpha) / atom_type_.mt_radius();
/* cook an effective potential from muffin-tin part and a tail */
std::vector<double> veff(rgrid.num_points());
for (int ir = 0; ir < nmtp; ir++) {
veff[ir] = spherical_potential_[ir];
}
/* simple tail alpha/r + beta */
for (int ir = nmtp; ir < rgrid.num_points(); ir++) {
veff[ir] = alpha * rgrid.x_inv(ir) + beta;
}
//== /* write spherical potential */
//== std::stringstream sstr;
//== sstr << "spheric_potential_" << id_ << ".dat";
//== FILE* fout = fopen(sstr.str().c_str(), "w");
//== for (int ir = 0; ir < rgrid.num_points(); ir++)
//== {
//== fprintf(fout, "%18.10f %18.10f\n", rgrid[ir], veff[ir]);
//== }
//== fclose(fout);
//== STOP();
/* charge density */
Spline<double> rho(rgrid);
/* atomic level energies */
std::vector<double> level_energy(atom_type_.num_atomic_levels());
for (int ist = 0; ist < atom_type_.num_atomic_levels(); ist++) {
level_energy[ist] = -1.0 * atom_type_.zn() / 2 / std::pow(double(atom_type_.atomic_level(ist).n), 2);
}
#pragma omp parallel default(shared)
{
std::vector<double> rho_t(rho.num_points());
std::memset(&rho_t[0], 0, rho.num_points() * sizeof(double));
#pragma omp for
for (int ist = 0; ist < atom_type_.num_atomic_levels(); ist++) {
if (atom_type_.atomic_level(ist).core) {
Bound_state bs(core_rel__, atom_type_.zn(), atom_type_.atomic_level(ist).n, atom_type_.atomic_level(ist).l,
atom_type_.atomic_level(ist).k, rgrid, veff, level_energy[ist]);
auto& rho = bs.rho();
for (int i = 0; i < rgrid.num_points(); i++) {
rho_t[i] += atom_type_.atomic_level(ist).occupancy * rho(i) / fourpi;
}
level_energy[ist] = bs.enu();
}
}
#pragma omp critical
for (int i = 0; i < rho.num_points(); i++) {
rho(i) += rho_t[i];
}
}
for (int ir = 0; ir < atom_type_.num_mt_points(); ir++) {
ae_core_charge_density_[ir] = rho(ir);
}
/* interpolate muffin-tin part of core density */
Spline<double> rho_mt(atom_type_.radial_grid(), ae_core_charge_density_);
/* compute core leakage */
core_leakage_ = fourpi * (rho.interpolate().integrate(2) - rho_mt.integrate(2));
/* compute eigen-value sum of core states */
core_eval_sum_ = 0.0;
for (int ist = 0; ist < atom_type_.num_atomic_levels(); ist++) {
if (atom_type_.atomic_level(ist).core) {
core_eval_sum_ += level_energy[ist] * atom_type_.atomic_level(ist).occupancy;
}
}
}
} // namespace
#endif // __ATOM_SYMMETRY_CLASS_H__
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. The fuzz member of
% image defines how much tolerance is acceptable to consider two colors as
% the same. For example, set fuzz to 10 and the color red at intensities of
% 100 and 102 respectively are now interpreted as the same color for the
% purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const DrawInfo *draw_info,const PixelInfo target,
% const ssize_t x_offset,const ssize_t y_offset,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset,
const ssize_t y_offset,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
Image
*floodplane_image;
MagickBooleanType
skip,
status;
MemoryInfo
*segment_info;
PixelInfo
fill_color,
pixel;
SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if ((image->alpha_trait == UndefinedPixelTrait) &&
(draw_info->fill.alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
floodplane_image->alpha_trait=UndefinedPixelTrait;
floodplane_image->colorspace=GRAYColorspace;
(void) QueryColorCompliance("#000",AllCompliance,
&floodplane_image->background_color,exception);
(void) SetImageBackgroundColor(floodplane_image,exception);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
status=MagickTrue;
start=0;
s=segment_stack;
PushSegmentStack(y_offset,x_offset,x_offset,1);
PushSegmentStack(y_offset+1,x_offset,x_offset,-1);
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
while (s > segment_stack)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
p+=x1*GetPixelChannels(image);
q+=x1*GetPixelChannels(floodplane_image);
for (x=x1; x >= 0; x--)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p-=GetPixelChannels(image);
q-=GetPixelChannels(floodplane_image);
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns-
x,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
status=SyncCacheViewAuthenticPixels(floodplane_view,exception);
if (status == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x <= x2; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
}
start=x;
} while (x <= x2);
}
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Tile fill color onto floodplane.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,p) != 0)
{
GetFillColor(draw_info,x,y,&fill_color,exception);
SetPixelViaPixelInfo(image,&fill_color,q);
}
p+=GetPixelChannels(floodplane_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelInfo *start_color,
% const PixelInfo *stop_color,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,const StopInfo *stops,
const size_t number_stops,ExceptionInfo *exception)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(stops != (const StopInfo *) NULL);
assert(number_stops > 0);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) (image->columns-1.0)*cosine)+
fabs((double) (image->rows-1.0)*sine);
gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine);
gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine);
gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine);
gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine);
}
gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/
2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)*
(image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) (image->columns-1.0)/2.0;
gradient->radii.y=(double) (image->rows-1.0)/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) (MagickMin((image->columns-1.0),
(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=number_stops;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memcpy(gradient->stops,stops,(size_t) number_stops*sizeof(*stops));
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) memset(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**histograms,
width;
ssize_t
center,
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)*
(width/2L)+GetPixelChannels(linear_image)*(width/2L);
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
size_t
*histogram;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
n,
v;
/*
Assign most frequent color.
*/
k=0;
j=0;
count=0;
(void) memset(histogram,0,NumberPaintBins* sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+GetPixelChannels(linear_image)*(u+k))));
histogram[n]++;
if (histogram[n] > count)
{
j=k+u;
count=histogram[n];
}
}
k+=(ssize_t) (linear_image->columns+width);
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(linear_image,i);
PixelTrait traits = GetPixelChannelTraits(linear_image,channel);
PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(paint_traits == UndefinedPixelTrait))
continue;
if ((paint_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(paint_image,channel,p[center+i],q);
continue;
}
SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+
i],q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(paint_image);
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (linear_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(linear_image,OilPaintImageTag,progress,
linear_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill argument.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target,
% const PixelInfo *fill,const MagickBooleanType invert,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
assert(fill != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
ConformPixelInfo(image,fill,&conform_fill,exception);
ConformPixelInfo(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert)
{
PixelTrait
traits;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(image,(Quantum) conform_fill.red,q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(image,(Quantum) conform_fill.green,q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(image,(Quantum) conform_fill.blue,q);
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlack(image,(Quantum) conform_fill.black,q);
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelAlpha(image,(Quantum) conform_fill.alpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,OpaquePaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const PixelInfo *target,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, TransparentPaintImage()
% is not suitable for the operations like chroma, where the tolerance for
% similarity of two color component (RGB) can be different. Thus we define
% this method to take two target pixels (one low and one high) and all the
% pixels of an image which are lying between these two pixels are made
% transparent.
%
% The format of the TransparentPaintImageChroma method is:
%
% MagickBooleanType TransparentPaintImageChroma(Image *image,
% const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
const MagickBooleanType invert,ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (PixelInfo *) NULL);
assert(low != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue :
MagickFalse;
if (match != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
displacement_field_greedy.c | #include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <float.h>
#include <string.h>
#include <stdbool.h>
#include "disptools.h"
#include "displacement_field_greedy.h"
#include "jacobian.h"
#include "error.h"
/*!
* \brief Update the displacement field according to the error
* on the Jacobian map.
*
* Each component of the displacement field is updated independently.
* For each component, check the error on the two neighbours. If the
* error is positive (negative) on the preceding (following) voxel
* along the direction, then the value of the field must be decreased,
* vice versa for errors of the opposite sign.
*/
static inline void greedy_step(
const Image old_field, /*!< Current displacement field */
const Image new_field, /*!< New displacement field */
const Image voxel_error, /*!< Error on the Jacobian */
const FLOATING eta /*!< Step length parameter */
)
{
// Do not iterate over the voxels on the boundary
const size_t x_max = old_field.nx - 1;
const size_t y_max = old_field.ny - 1;
const size_t z_max = old_field.nz - 1;
// Precompute fractional step for each direction
const FLOATING ddx = eta * old_field.dx * .5f;
const FLOATING ddy = eta * old_field.dy * .5f;
const FLOATING ddz = eta * old_field.dz * .5f;
// Update the displacement vectors in each voxel according to the error
#ifdef __GNUC__
#pragma omp parallel for collapse(3) schedule(static)
for (size_t z = 1; z < z_max; ++z) {
for (size_t y = 1; y < y_max; ++y) {
for (size_t x = 1; x < x_max; ++x) {
#else // MSVC 15 does not support OpenMP > 2.0
int z;
#pragma omp parallel for
for (z = 1; z < z_max; ++z) {
for (size_t y = 1; y < y_max; ++y) {
for (size_t x = 1; x < x_max; ++x) {
#endif
const FLOATING delta_x = ddx * (__(voxel_error, x+1, y, z ) -
__(voxel_error, x-1, y, z ));
const FLOATING delta_y = ddy * (__(voxel_error, x, y+1, z ) -
__(voxel_error, x, y-1, z ));
const FLOATING delta_z = ddz * (__(voxel_error, x, y, z+1) -
__(voxel_error, x, y, z-1));
_(new_field, x, y, z, X) = _(old_field, x, y, z, X) + delta_x;
_(new_field, x, y, z, Y) = _(old_field, x, y, z, Y) + delta_y;
_(new_field, x, y, z, Z) = _(old_field, x, y, z, Z) + delta_z;
}
}
}
}
/*!
* \brief Find a displacement field that realises the given Jacobian.
*
* Employ a greedy search, starting from an initial guess of the
* displacement field (passed in the `field' argument). At each
* iteration, compute the Jacobian of the current displacement field,
* then correct the components of the field on each voxel according to
* the error on the neighbours. Each component is corrected
* independently, according to the two neighbours along that direction.
*
* Use two couples of buffers, to store a copy of the displacement field
* and its Jacobian at the current iteration, before and after the
* correction. If the correction improves the result, then switch the
* buffers and proceed with the next iteration, otherwise keep the
* current displacement field.
*/
void generate_displacement_greedy(
const size_t nx, /*!< Width of the image */
const size_t ny, /*!< Length of the image */
const size_t nz, /*!< Depth of the image */
const FLOATING dx, /*!< x spacing */
const FLOATING dy, /*!< y spacing */
const FLOATING dz, /*!< z spacing */
const FLOATING *J, /*!< Target Jacobian */
const bool *mask, /*!< Body mask */
const FLOATING epsilon, /*!< Tolerance on the Jacobian per voxel */
const FLOATING tolerance, /*!< Jacobian tolerance on background */
FLOATING eta, /*!< Initial step length for the optimisation */
const FLOATING eta_max, /*!< Maximum step length allowed */
const FLOATING alpha, /*!< Step length increase coefficient */
const FLOATING beta, /*!< Step length decrease coefficient */
const FLOATING gamma, /*!< Armijo-Goldstein parameter */
const FLOATING delta, /*!< Jacobian regularisation threshold */
const FLOATING zeta, /*!< Jacobian regularisation weight */
const FLOATING theta, /*!< Termination condition based on improvement */
const FLOATING iota, /*!< Termination condition based on eta */
const bool strict, /*!< Always improve maximum voxel error */
const size_t it_max, /*!< Maximum number of iterations */
FLOATING *field /*!< Resulting displacement field */
)
{
ASSERT_PARAMETERS;
disptools_clear_error();
// Image size
const size_t voxel_number = nx * ny * nz;
const size_t image_size = voxel_number * sizeof (FLOATING);
// Use two buffers that are swapped
unsigned old_buffer = 0, new_buffer = 1;
// Wrap arrays in data strucutres
Image J_ = {1, nx, ny, nz, dx, dy, dz, (FLOATING*) J};
Mask mask_ = {nx, ny, nz, (bool*) mask};
// Allocate memory for the Jacobian map of the moving field
// Use two buffers
Image J_field_[2] = {
new_image(3, nx, ny, nz, dx, dy, dz),
new_image(3, nx, ny, nz, dx, dy, dz),
};
// Allocate memory for the moving field
// Use two buffers
Image field_[2] = {
new_image(3, nx, ny, nz, dx, dy, dz),
new_image(3, nx, ny, nz, dx, dy, dz),
};
// Allocate memory for the voxel error term
Image voxel_error = new_image(1, nx, ny, nz, dx, dy, dz);
FLOATING last_error = DBL_MAX, error = DBL_MAX;
FLOATING max_voxel_error = 0.0;
if (disptools_has_error()) {
goto cleanup;
}
// Copy initial guess in the buffer
memcpy(field_[old_buffer].data, field, 3 * image_size);
// Compute the error of the initial guess
jacobian(field_[old_buffer], J_field_[old_buffer]);
last_error = compute_error(J_,
J_field_[old_buffer],
mask_,
tolerance,
voxel_error,
&max_voxel_error
);
size_t it;
for (it = 1; it <= it_max; ++it) {
// Update the moving displacement field
greedy_step(field_[old_buffer],
field_[new_buffer],
voxel_error,
eta
);
// Compute the Jacobian map of the moving displacement field
jacobian(field_[new_buffer], J_field_[new_buffer]);
// Compute the error of the moving field
error = compute_error(J_,
J_field_[new_buffer],
mask_,
tolerance,
voxel_error,
&max_voxel_error
);
// Verbose feedback
verbose_printf(true,
"Iteration %5ld: "
"total error %6e "
"max voxel error %6e "
"eta %6e\n",
it, error, max_voxel_error, eta);
// Stopping conditions
if (!isnormal(error)) {
verbose_printf(true, "Terminating: error exploded.\n");
break;
}
if (error >= last_error) {
// Try to reduce the step size
eta *= beta;
// Terminate if eta is too small
if (eta < iota) {
verbose_printf(true, "Error not decreasing, terminating.\n");
break;
}
// Otherwise, repeat the last iteration with the new eta
--it;
verbose_printf(true, "Error not decreasing, "
"reducing step size to %.4e\n", eta);
continue;
}
if (1.0 - error / last_error < theta) {
verbose_printf(true, "Error not decreasing, terminating.\n");
break;
}
if (!isnormal(max_voxel_error)) {
verbose_printf(true, "Terminating: voxel error exploded.\n");
break;
}
if (max_voxel_error < epsilon) {
verbose_printf(true, "Terminating: reached desired tolerance.\n");
break;
}
// Save error and swap the buffers
last_error = error;
XOR_SWAP(old_buffer, new_buffer);
}
verbose_printf(it == it_max, "Terminating: reached maximum number of iterations.\n");
// Copy result for the caller
memcpy(field, field_[old_buffer].data, 3 * image_size);
cleanup:
// Release buffers
delete_image(&field_[0]);
delete_image(&field_[1]);
delete_image(&J_field_[0]);
delete_image(&J_field_[1]);
delete_image(&voxel_error);
}
|
GB_unaryop__identity_int32_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int32_fp64
// op(A') function: GB_tran__identity_int32_fp64
// C type: int32_t
// A type: double
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int32_t z ; GB_CAST_SIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int32_fp64
(
int32_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
drsdd.c | /*! @copyright (c) 2017 King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
*
* STARS-H is a software package, provided by King Abdullah
* University of Science and Technology (KAUST)
*
* @file src/backends/openmp/blrm/drsdd.c
* @version 1.3.0
* @author Aleksandr Mikhalev
* @date 2017-11-07
* */
#include "common.h"
#include "starsh.h"
int starsh_blrm__drsdd_omp(STARSH_blrm **matrix, STARSH_blrf *format,
int maxrank, double tol, int onfly)
//! Approximate each tile by randomized SVD.
/*!
* @param[out] matrix: Address of pointer to @ref STARSH_blrm object.
* @param[in] format: Block low-rank format.
* @param[in] maxrank: Maximum possible rank.
* @param[in] tol: Relative error tolerance.
* @param[in] onfly: Whether not to store dense blocks.
* @ingroup blrm
* */
{
STARSH_blrf *F = format;
STARSH_problem *P = F->problem;
STARSH_kernel *kernel = P->kernel;
STARSH_int nblocks_far = F->nblocks_far;
STARSH_int nblocks_near = F->nblocks_near;
// Shortcuts to information about clusters
STARSH_cluster *RC = F->row_cluster;
STARSH_cluster *CC = F->col_cluster;
void *RD = RC->data, *CD = CC->data;
// Following values default to given block low-rank format F, but they are
// changed when there are false far-field blocks.
STARSH_int new_nblocks_far = nblocks_far;
STARSH_int new_nblocks_near = nblocks_near;
STARSH_int *block_far = F->block_far;
STARSH_int *block_near = F->block_near;
// Places to store low-rank factors, dense blocks and ranks
Array **far_U = NULL, **far_V = NULL, **near_D = NULL;
int *far_rank = NULL;
double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL;
size_t offset_U = 0, offset_V = 0, offset_D = 0;
STARSH_int bi, bj = 0;
double drsdd_time = 0, kernel_time = 0;
int BAD_TILE = 0;
const int oversample = starsh_params.oversample;
// Init buffers to store low-rank factors of far-field blocks if needed
if(nblocks_far > 0)
{
STARSH_MALLOC(far_U, nblocks_far);
STARSH_MALLOC(far_V, nblocks_far);
STARSH_MALLOC(far_rank, nblocks_far);
size_t size_U = 0, size_V = 0;
// Simple cycle over all far-field blocks
for(bi = 0; bi < nblocks_far; bi++)
{
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
size_U += RC->size[i];
size_V += CC->size[j];
}
size_U *= maxrank;
size_V *= maxrank;
STARSH_MALLOC(alloc_U, size_U);
STARSH_MALLOC(alloc_V, size_V);
for(bi = 0; bi < nblocks_far; bi++)
{
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
size_t nrows = RC->size[i], ncols = CC->size[j];
int shape_U[] = {nrows, maxrank};
int shape_V[] = {ncols, maxrank};
double *U = alloc_U+offset_U, *V = alloc_V+offset_V;
offset_U += nrows*maxrank;
offset_V += ncols*maxrank;
array_from_buffer(far_U+bi, 2, shape_U, 'd', 'F', U);
array_from_buffer(far_V+bi, 2, shape_V, 'd', 'F', V);
}
offset_U = 0;
offset_V = 0;
}
// Work variables
int info;
// Simple cycle over all far-field admissible blocks
#pragma omp parallel for schedule(dynamic,1)
for(bi = 0; bi < nblocks_far; bi++)
{
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
int nrows = RC->size[i];
int ncols = CC->size[j];
if(nrows != ncols && BAD_TILE == 0)
{
#pragma omp critical
BAD_TILE = 1;
STARSH_WARNING("This was only tested on square tiles, error of "
"approximation may be much higher, than demanded");
}
int mn = nrows < ncols ? nrows : ncols;
int mn2 = maxrank+oversample;
if(mn2 > mn)
mn2 = mn;
// Get size of temporary arrays
int lwork = ncols, lwork_sdd = (4*mn2+7)*mn2;
if(lwork_sdd > lwork)
lwork = lwork_sdd;
lwork += (size_t)mn2*(2*ncols+nrows+mn2+1);
int liwork = 8*mn2;
double *D, *work;
int *iwork;
int info;
// Allocate temporary arrays
STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);
STARSH_PMALLOC(iwork, liwork, info);
STARSH_PMALLOC(work, lwork, info);
// Compute elements of a block
double time0 = omp_get_wtime();
kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],
RD, CD, D, nrows);
double time1 = omp_get_wtime();
starsh_dense_dlrrsdd(nrows, ncols, D, nrows, far_U[bi]->data, nrows,
far_V[bi]->data, ncols, far_rank+bi, maxrank, oversample, tol,
work, lwork, iwork);
double time2 = omp_get_wtime();
#pragma omp critical
{
drsdd_time += time2-time1;
kernel_time += time1-time0;
}
// Free temporary arrays
free(D);
free(work);
free(iwork);
}
// Get number of false far-field blocks
STARSH_int nblocks_false_far = 0;
STARSH_int *false_far = NULL;
for(bi = 0; bi < nblocks_far; bi++)
if(far_rank[bi] == -1)
nblocks_false_far++;
if(nblocks_false_far > 0)
{
// IMPORTANT: `false_far` must to be in ascending order for later code
// to work normally
STARSH_MALLOC(false_far, nblocks_false_far);
bj = 0;
for(bi = 0; bi < nblocks_far; bi++)
if(far_rank[bi] == -1)
false_far[bj++] = bi;
}
// Update lists of far-field and near-field blocks using previously
// generated list of false far-field blocks
if(nblocks_false_far > 0)
{
// Update list of near-field blocks
new_nblocks_near = nblocks_near+nblocks_false_far;
STARSH_MALLOC(block_near, 2*new_nblocks_near);
// At first get all near-field blocks, assumed to be dense
#pragma omp parallel for schedule(static)
for(bi = 0; bi < 2*nblocks_near; bi++)
block_near[bi] = F->block_near[bi];
// Add false far-field blocks
#pragma omp parallel for schedule(static)
for(bi = 0; bi < nblocks_false_far; bi++)
{
STARSH_int bj = false_far[bi];
block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];
block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];
}
// Update list of far-field blocks
new_nblocks_far = nblocks_far-nblocks_false_far;
if(new_nblocks_far > 0)
{
STARSH_MALLOC(block_far, 2*new_nblocks_far);
bj = 0;
for(bi = 0; bi < nblocks_far; bi++)
{
// `false_far` must be in ascending order for this to work
if(bj < nblocks_false_far && false_far[bj] == bi)
{
bj++;
}
else
{
block_far[2*(bi-bj)] = F->block_far[2*bi];
block_far[2*(bi-bj)+1] = F->block_far[2*bi+1];
}
}
}
// Update format by creating new format
STARSH_blrf *F2;
info = starsh_blrf_new_from_coo(&F2, P, F->symm, RC, CC,
new_nblocks_far, block_far, new_nblocks_near, block_near,
F->type);
// Swap internal data of formats and free unnecessary data
STARSH_blrf tmp_blrf = *F;
*F = *F2;
*F2 = tmp_blrf;
STARSH_WARNING("`F` was modified due to false far-field blocks");
starsh_blrf_free(F2);
}
// Compute near-field blocks if needed
if(onfly == 0 && new_nblocks_near > 0)
{
STARSH_MALLOC(near_D, new_nblocks_near);
size_t size_D = 0;
// Simple cycle over all near-field blocks
for(bi = 0; bi < new_nblocks_near; bi++)
{
// Get indexes of corresponding block row and block column
STARSH_int i = block_near[2*bi];
STARSH_int j = block_near[2*bi+1];
// Get corresponding sizes and minimum of them
size_t nrows = RC->size[i];
size_t ncols = CC->size[j];
// Update size_D
size_D += nrows*ncols;
}
STARSH_MALLOC(alloc_D, size_D);
// For each near-field block compute its elements
#pragma omp parallel for schedule(dynamic,1)
for(bi = 0; bi < new_nblocks_near; bi++)
{
// Get indexes of corresponding block row and block column
STARSH_int i = block_near[2*bi];
STARSH_int j = block_near[2*bi+1];
// Get corresponding sizes and minimum of them
int nrows = RC->size[i];
int ncols = CC->size[j];
int shape[2] = {nrows, ncols};
double *D;
#pragma omp critical
{
D = alloc_D+offset_D;
array_from_buffer(near_D+bi, 2, shape, 'd', 'F', D);
offset_D += near_D[bi]->size;
}
double time0 = omp_get_wtime();
kernel(nrows, ncols, RC->pivot+RC->start[i],
CC->pivot+CC->start[j], RD, CD, D, nrows);
double time1 = omp_get_wtime();
#pragma omp critical
kernel_time += time1-time0;
}
}
// Change sizes of far_rank, far_U and far_V if there were false
// far-field blocks
if(nblocks_false_far > 0 && new_nblocks_far > 0)
{
bj = 0;
for(bi = 0; bi < nblocks_far; bi++)
{
if(far_rank[bi] == -1)
bj++;
else
{
int shape_U[2] = {far_U[bi]->shape[0], far_rank[bi]};
int shape_V[2] = {far_V[bi]->shape[0], far_rank[bi]};
array_from_buffer(far_U+bi-bj, 2, shape_U, 'd', 'F',
far_U[bi]->data);
array_from_buffer(far_V+bi-bj, 2, shape_V, 'd', 'F',
far_V[bi]->data);
far_rank[bi-bj] = far_rank[bi];
}
}
STARSH_REALLOC(far_rank, new_nblocks_far);
STARSH_REALLOC(far_U, new_nblocks_far);
STARSH_REALLOC(far_V, new_nblocks_far);
//STARSH_REALLOC(alloc_U, offset_U);
//STARSH_REALLOC(alloc_V, offset_V);
}
// If all far-field blocks are false, then dealloc buffers
if(new_nblocks_far == 0 && nblocks_far > 0)
{
block_far = NULL;
free(far_rank);
far_rank = NULL;
free(far_U);
far_U = NULL;
free(far_V);
far_V = NULL;
free(alloc_U);
alloc_U = NULL;
free(alloc_V);
alloc_V = NULL;
}
// Dealloc list of false far-field blocks if it is not empty
if(nblocks_false_far > 0)
free(false_far);
// Finish with creating instance of Block Low-Rank Matrix with given
// buffers
//STARSH_WARNING("DRSDD kernel total time: %e secs", drsdd_time);
//STARSH_WARNING("MATRIX kernel total time: %e secs", kernel_time);
return starsh_blrm_new(matrix, F, far_rank, far_U, far_V, onfly, near_D,
alloc_U, alloc_V, alloc_D, '1');
}
|
rndgen.c | // Generates a sequence of N random double precision values.
// For each value x holds isfinite(x) == true but x is otherwise arbitrary.
#include "common.h"
int main(int argc, char *argv[])
{
if (3 != argc) {
(void)fprintf(stderr, "%s N FileName\n", argv[0]);
return EXIT_FAILURE;
}
const size_t n = atoz(argv[1]);
if (!n) {
perror("atoz");
return EXIT_FAILURE;
}
FILE *const r = fopen("/dev/urandom", "rb");
if (!r) {
perror("fopen(/dev/urandom)");
return EXIT_FAILURE;
}
FILE *const f = ((argv[2] && *(argv[2])) ? fopen(argv[2], "wb") : (FILE*)NULL);
if (!f) {
perror("fopen");
return EXIT_FAILURE;
}
double *const d = (double*)calloc(n, sizeof(double));
if (!d) {
perror("calloc");
return EXIT_FAILURE;
}
(void)fprintf(stdout, "reading %zu bytes from /dev/urandom to memory... ", (n * sizeof(double)));
(void)fflush(stdout);
if (n != fread(d, sizeof(double), n, r)) {
perror("fread");
return EXIT_FAILURE;
}
(void)fprintf(stdout, "done\n");
(void)fflush(stdout);
(void)fprintf(stdout, "checking for NaNs and infinities with %d threads... ", omp_get_max_threads());
(void)fflush(stdout);
size_t j = 0u;
bool k = true;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,d,r,j,k)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < n; ++i) {
while (!isfinite(d[i]) && k) {
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
if (k) {
if ((size_t)1u != fread(d + i, sizeof(double), (size_t)1u, r)) {
k = false;
perror("fread");
}
else
++j;
}
}
}
(void)fprintf(stdout, "%zu found and replaced\n", j);
(void)fflush(stdout);
if (!k)
return EXIT_FAILURE;
(void)fprintf(stdout, "writing %s file... ", argv[2]);
(void)fflush(stdout);
if (n != fwrite(d, sizeof(double), n, f))
perror("fwrite");
else if (fclose(f))
perror("fclose");
else if (fclose(r))
perror("fclose(/dev/urandom)");
else {
(void)fprintf(stdout, "done\n");
(void)fflush(stdout);
return EXIT_SUCCESS;
}
return EXIT_FAILURE;
}
|
MaxLut.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <array>
#include <vector>
#include "bb/BinaryLutModel.h"
namespace bb {
// LUT版popcount
template <typename BinType = float, typename RealType = float>
class MaxLut : public BinaryLutModel
{
using _super = BinaryLutModel;
public:
static inline std::string ClassName(void) { return "MaxLut"; }
static inline std::string ObjectName(void){ return ClassName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); }
std::string GetModelName(void) const override { return ClassName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
bool m_binarize_input = false;
bool m_binarize_output = true;
std::string m_connection;
int m_n = 6;
indices_t m_input_shape;
indices_t m_output_shape;
Tensor_<std::int32_t> m_input_index;
std::mt19937_64 m_mt;
public:
struct create_t
{
int n = 6;
indices_t output_shape;
std::string connection = "";
bool binarize_input = false;
bool binarize_output = true;
std::uint64_t seed = 1;
};
protected:
MaxLut(create_t const &create)
{
BB_ASSERT(!create.output_shape.empty());
m_mt.seed(create.seed);
m_n = create.n;
m_output_shape = create.output_shape;
m_connection = create.connection;
m_input_index.Resize(CalcShapeSize(m_output_shape), (index_t)m_n);
}
void CommandProc(std::vector<std::string> args) override
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
// バイナリモード設定
if ( args.size() == 2 && args[0] == "binary" )
{
m_binarize_input = EvalBool(args[1]);
m_binarize_output = EvalBool(args[1]);
}
if ( args.size() == 2 && args[0] == "binarize_input" )
{
m_binarize_input = EvalBool(args[1]);
}
if ( args.size() == 2 && args[0] == "binarize_output" )
{
m_binarize_output = EvalBool(args[1]);
}
}
public:
~MaxLut() {}
static std::shared_ptr<MaxLut> Create(create_t const &create)
{
return std::shared_ptr<MaxLut>(new MaxLut(create));
}
static std::shared_ptr<MaxLut> Create(int n, indices_t const &output_shape, std::string connection = "", bool binarize = true, bool binarize_input = false, std::uint64_t seed = 1)
{
create_t create;
create.n = n;
create.output_shape = output_shape;
create.connection = connection;
create.binarize_input = binarize_input;
create.binarize_output = binarize;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<MaxLut> Create(int n, index_t output_node_size, std::string connection = "", bool binarize = true, bool binarize_input = false, std::uint64_t seed = 1)
{
create_t create;
create.n = n;
create.output_shape.resize(1);
create.output_shape[0] = output_node_size;
create.connection = connection;
create.binarize_input = binarize_input;
create.binarize_output = binarize;
create.seed = seed;
return Create(create);
}
static std::shared_ptr<MaxLut> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11 // python用
static std::shared_ptr<MaxLut> CreatePy(
int n,
indices_t output_shape,
std::string connection="",
bool binarize = true,
bool binarize_input = false,
std::uint64_t seed = 1)
{
create_t create;
create.n = n;
create.output_shape = output_shape;
create.connection = connection;
create.binarize_input = binarize_input;
create.binarize_output = binarize;
create.seed = seed;
return Create(create);
}
#endif
auto lock_InputIndex(void) { return m_input_index.Lock(); }
auto lock_InputIndex_const(void) const { return m_input_index.LockConst(); }
// 疎結合の管理
index_t GetNodeConnectionSize(index_t node) const override
{
return m_n;
}
void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node) override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < m_n);
BB_DEBUG_ASSERT(input_node >= 0 && input_node < GetInputNodeSize());
auto ptr = lock_InputIndex();
ptr(node, input_index) = (std::int32_t)input_node;
}
index_t GetNodeConnectionIndex(index_t node, index_t input_index) const override
{
BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape));
BB_ASSERT(input_index >= 0 && input_index < m_n);
auto ptr = lock_InputIndex_const();
return (index_t)ptr(node, input_index);
}
// LUT操作の定義
int GetLutTableSize(index_t node) const
{
return (1 << m_n);
}
void SetLutTable(index_t node, int bitpos, bool value) override
{
}
bool GetLutTable(index_t node, int bitpos) const override
{
int count = 0;
for ( int i = 0; i < m_n; ++i ) {
count += (bitpos & 1) ? +1 : -1;
bitpos >>= 1;
}
return count > 0;
}
/**
* @brief 入力のshape設定
* @detail 入力のshape設定
* @param shape 新しいshape
* @return なし
*/
indices_t SetInputShape(indices_t shape) override
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
// 形状設定
m_input_shape = shape;
// 接続初期化
this->InitializeNodeInput(m_mt(), m_connection);
return m_output_shape;
}
/**
* @brief 出力のshape設定
* @detail 出力のshape設定
* 出力ノード数が変わらない限りshpeは自由
* @param shape 新しいshape
* @return なし
*/
void SetOutputShape(indices_t const &shape)
{
BB_ASSERT(CalcShapeSize(shape) == this->m_output_node_size);
m_output_shape = shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const override
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const override
{
return m_output_shape;
}
public:
FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
BB_ASSERT(x_buf.GetType() == DataType<BinType>::type);
// SetInputShpaeされていなければ初回に設定
if (x_buf.GetShape() != m_input_shape) {
SetInputShape(x_buf.GetShape());
}
if (train) {
this->PushFrameBuffer(x_buf);
}
// 出力を設定
FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<BinType>::type);
#ifdef BB_WITH_CUDA
if ( DataType<BinType>::type == BB_TYPE_FP32 && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_MaxLut_Forward<float>
(
(float const *)x_ptr.GetAddr(),
(float *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(float)),
(bool )m_binarize_input,
(bool )m_binarize_output
);
return y_buf;
}
if ( DataType<BinType>::type == BB_TYPE_BIT && !m_host_only
&& x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto y_ptr = y_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_bit_MaxLut_Forward
(
(int const *)x_ptr.GetAddr(),
(int *)y_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )y_buf.GetNodeSize(),
(int )y_buf.GetFrameSize(),
(int )(y_buf.GetFrameStride() / sizeof(int))
);
return y_buf;
}
#endif
{
// 汎用版
auto x_ptr = x_buf.LockConst<BinType>();
auto y_ptr = y_buf.Lock<BinType>();
auto input_index_ptr = m_input_index.LockConst();
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = this->GetOutputNodeSize();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
BinType max_val = (BinType)BB_BINARY_LO;
for (index_t i = 0; i < m_n; i++) {
index_t input_node = input_index_ptr(node, i);
BinType val = (RealType)x_ptr.Get(frame, input_node);
if (val > max_val) {
max_val = val;
}
}
if (m_binarize_output) {
max_val = (max_val > (BinType)0) ? (BinType)BB_BINARY_HI : (BinType)BB_BINARY_LO;
}
y_ptr.Set(frame, node, max_val);
}
}
return y_buf;
}
}
// Backward
FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return dy_buf;
}
BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type);
FrameBuffer x_buf = this->PopFrameBuffer();
BB_ASSERT(x_buf.GetType() == DataType<BinType>::type);
// 出力を設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<RealType>::type);
#ifdef BB_WITH_CUDA
if ( DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_MaxLut_Backward<float>
(
(float const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(dy_buf.GetFrameStride() / sizeof(float)),
(bool )m_binarize_input
);
return dx_buf;
}
if ( DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only
&& dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto x_ptr = x_buf.LockDeviceMemoryConst();
auto dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dx_ptr = dx_buf.LockDeviceMemory(true);
auto input_index_ptr = m_input_index.LockDeviceMemoryConst();
bbcu_bit_MaxLut_Backward<float>
(
(int const *)x_ptr.GetAddr(),
(float const *)dy_ptr.GetAddr(),
(float *)dx_ptr.GetAddr(),
(int const *)input_index_ptr.GetAddr(),
(int )m_n,
(int )dx_buf.GetNodeSize(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )(x_buf.GetFrameStride() / sizeof(int)),
(int )(dy_buf.GetFrameStride() / sizeof(float))
);
return dx_buf;
}
#endif
{
// 汎用版
dx_buf.FillZero();
auto x_ptr = x_buf.LockConst<BinType>();
auto dy_ptr = dy_buf.LockConst<RealType>();
auto dx_ptr = dx_buf.Lock<RealType>();
auto input_index_ptr = m_input_index.LockConst();
index_t frame_size = dy_buf.GetFrameSize();
index_t node_size = this->GetOutputNodeSize();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
for (index_t frame = 0; frame < frame_size; ++frame) {
BinType max_val = (BinType)BB_BINARY_LO;
index_t max_idx = 0;
for (index_t i = 0; i < m_n; i++) {
index_t input_node = input_index_ptr(node, i);
BinType val = (RealType)x_ptr.Get(frame, input_node);
if (val > max_val) {
max_val = val;
max_idx = i;
}
}
auto dx = dy_ptr.Get(frame, node);
index_t input_node = input_index_ptr(node, max_idx);
dx_ptr.Add(frame, input_node, dx);
}
}
return dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_n);
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_connection);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
bb::SaveValue(os, m_binarize_input);
bb::SaveValue(os, m_binarize_output);
m_input_index.DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_n);
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_connection);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
bb::LoadValue(is, m_binarize_input);
bb::LoadValue(is, m_binarize_output);
m_input_index.LoadObject(is);
}
};
} |
GB_add_phase0.c | //------------------------------------------------------------------------------
// GB_add_phase0: find vectors of C to compute for C=A+B or C<M>=A+B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// The eWise add of two matrices, C=A+B, C<M>=A+B, or C<!M>=A+B starts with
// this phase, which determines which vectors of C need to be computed.
// This phase is also used for GB_masker.
// On input, A and B are the two matrices being added, and M is the optional
// mask matrix (not complemented). The complemented mask is handed in GB_mask,
// not here.
// The A matrix can be sparse, hypersparse, slice, or hyperslice. The B matrix
// can only be sparse or hypersparse. See GB_wait, which can pass in A as any
// of the four formats. In this case, no mask is present.
// On output, an integer (Cnvec) a boolean (Ch_to_Mh) and up to 3 arrays are
// returned, either NULL or of size Cnvec. Let n = A->vdim be the vector
// dimension of A, B, M and C.
// Ch: the list of vectors to compute. If not NULL, Ch [k] = j is the
// kth vector in C to compute, which will become the hyperlist C->h of C.
// Note that some of these vectors may turn out to be empty, because of
// the mask, or because the vector j appeared in A or B, but is empty.
// It is pruned at the end of GB_add_phase2. If Ch is NULL then it is an
// implicit list of size n, and Ch [k] == k for all k = 0:n-1. In this
// case, C will be a standard matrix, not hypersparse. Thus, the kth
// vector is j = (Ch == NULL) ? k : Ch [k].
// Ch is freed by GB_add if phase1 fails. phase2 either frees it or
// transplants it into C.
// Ch_is_Mh: true if the mask M is present, hypersparse, and not
// complemented, false otherwise. In this case Ch is a deep copy of Mh.
// Only GB_add uses this option; it is not used by GB_masker (Ch_is_Mh
// is always false for GB_masker). This is determined by passing in
// p_Ch_is_Mh as a NULL or non-NULL pointer.
// C_to_A: if A is hypersparse, then C_to_A [k] = kA if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in A, as j = Ah [kA]. If j does
// not appear in A, then C_to_A [k] = -1. If A is not hypersparse, then
// C_to_A is returned as NULL.
// C_to_B: if B is hypersparse, then C_to_B [k] = kB if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in B, as j = Bh [kB]. If j does
// not appear in B, then C_to_B [k] = -1. If B is not hypersparse, then
// C_to_B is returned as NULL.
// C_to_M: if M is hypersparse, and Ch_is_Mh is false, then C_to_M [k] =
// kM if the kth vector, j = (Ch == NULL) ? k : Ch [k] appears in M, as j
// = Mh [kM]. If j does not appear in M, then C_to_M [k] = -1. If M is
// not hypersparse, then C_to_M is returned as NULL.
#include "GB_add.h"
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (kA_start, ntasks+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (kB_start, ntasks+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (kC_start, ntasks+1, sizeof (int64_t)) ; \
}
//------------------------------------------------------------------------------
// GB_allocate_result
//------------------------------------------------------------------------------
static inline bool GB_allocate_result
(
int64_t Cnvec,
int64_t *GB_RESTRICT *Ch_handle,
int64_t *GB_RESTRICT *C_to_M_handle,
int64_t *GB_RESTRICT *C_to_A_handle,
int64_t *GB_RESTRICT *C_to_B_handle
)
{
bool ok = true ;
if (Ch_handle != NULL)
{
GB_MALLOC_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
ok = (*Ch_handle != NULL) ;
}
if (C_to_M_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_M_handle != NULL) ;
}
if (C_to_A_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_A_handle != NULL) ;
}
if (C_to_B_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_B_handle != NULL) ;
}
if (!ok)
{
// out of memory
if (Ch_handle != NULL)
{
GB_FREE_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_M_handle != NULL)
{
GB_FREE_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_A_handle != NULL)
{
GB_FREE_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_B_handle != NULL)
{
GB_FREE_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
}
}
return (ok) ;
}
//------------------------------------------------------------------------------
// GB_add_phase0: find the vectors of C for C<M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB_add_phase0 // find vectors in C for C=A+B or C<M>=A+B
(
int64_t *p_Cnvec, // # of vectors to compute in C
int64_t *GB_RESTRICT *Ch_handle, // Ch: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_M_handle, // C_to_M: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_A_handle, // C_to_A: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_B_handle, // C_to_B: of size Cnvec, or NULL
bool *p_Ch_is_Mh, // if true, then Ch == Mh
const GrB_Matrix M, // optional mask, may be NULL; not complemented
const GrB_Matrix A, // standard, hypersparse, slice, or hyperslice
const GrB_Matrix B, // standard or hypersparse; never a slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Cnvec != NULL) ;
ASSERT (Ch_handle != NULL) ;
ASSERT (C_to_A_handle != NULL) ;
ASSERT (C_to_B_handle != NULL) ;
ASSERT_MATRIX_OK (A, "A for add phase0", GB0) ;
ASSERT_MATRIX_OK (B, "B for add phase0", GB0) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for add phase0", GB0) ;
ASSERT (A->vdim == B->vdim) ;
ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
int64_t *GB_RESTRICT Ch = NULL ;
int64_t *GB_RESTRICT C_to_M = NULL ;
int64_t *GB_RESTRICT C_to_A = NULL ;
int64_t *GB_RESTRICT C_to_B = NULL ;
(*Ch_handle) = NULL ;
(*C_to_A_handle) = NULL ;
(*C_to_B_handle) = NULL ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = NULL ;
}
int64_t *GB_RESTRICT kA_start = NULL ;
int64_t *GB_RESTRICT kB_start = NULL ;
int64_t *GB_RESTRICT kC_start = NULL ;
int ntasks = 0 ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1 ; // nthreads depends on Cnvec, computed below
//--------------------------------------------------------------------------
// get content of M, A, and B
//--------------------------------------------------------------------------
int64_t Cnvec ;
int64_t n = A->vdim ;
int64_t Anvec = A->nvec ;
bool A_is_hyper = A->is_hyper ;
bool A_is_slice = A->is_slice ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = (A_is_hyper) ? A->h : NULL ;
const int64_t A_hfirst = A->hfirst ;
#define GB_Ah(k) (A_is_hyper ? Ah [k] : (A_hfirst + (k)))
int64_t Bnvec = B->nvec ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
bool B_is_hyper = B->is_hyper ;
ASSERT (!B->is_slice) ;
int64_t Mnvec = 0 ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mh = NULL ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mnvec = M->nvec ;
Mp = M->p ;
Mh = M->h ;
M_is_hyper = M->is_hyper ;
ASSERT (!M->is_slice) ;
}
// For GB_add, if M is present, hypersparse, and not complemented, then C
// will be hypersparse, and it will have set of vectors as M (Ch == Mh).
// For GB_masker, Ch is never equal to Mh.
bool Ch_is_Mh = (p_Ch_is_Mh != NULL) && (M != NULL && M_is_hyper) ;
//--------------------------------------------------------------------------
// find the set union of the non-empty vectors of A and B
//--------------------------------------------------------------------------
if (Ch_is_Mh)
{
//----------------------------------------------------------------------
// C is hypersparse, with the same vectors as the hypersparse M
//----------------------------------------------------------------------
// This step is done for GB_add only, not GB_masker.
// GB_wait is the only place where A may be a slice, and it does not
// use a mask. So this phase can ignore the case where A is a slice.
Cnvec = Mnvec ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
ASSERT (!A_is_slice) ;
if (!GB_allocate_result (Cnvec, &Ch, NULL,
(A_is_hyper) ? (&C_to_A) : NULL, (B_is_hyper) ? (&C_to_B) : NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
// copy Mh into Ch. Ch is Mh so C_to_M is not needed.
GB_memcpy (Ch, Mh, Mnvec * sizeof (int64_t), nthreads) ;
// construct the mapping from C to A and B, if they are hypersparse
if (A_is_hyper || B_is_hyper)
{
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
if (A_is_hyper)
{
// C_to_A [k] = kA if Ah [kA] == j and A(:,j) is non-empty
int64_t kA = 0, pA, pA_end ;
GB_lookup (true, Ah, Ap, &kA, Anvec-1, j, &pA, &pA_end) ;
C_to_A [k] = (pA < pA_end) ? kA : -1 ;
}
if (B_is_hyper)
{
// C_to_B [k] = kB if Bh [kB] == j and B(:,j) is non-empty
int64_t kB = 0, pB, pB_end ;
GB_lookup (true, Bh, Bp, &kB, Bnvec-1, j, &pB, &pB_end) ;
C_to_B [k] = (pB < pB_end) ? kB : -1 ;
}
}
}
}
else if ((A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse or a hyperslice, and B is hypersparse
//----------------------------------------------------------------------
// Ch is the set union of Ah and Bh. This is handled with a parallel
// merge, since Ah and Bh are both sorted lists.
//----------------------------------------------------------------------
// phase 0: create the tasks
//----------------------------------------------------------------------
double work = GB_IMIN (Anvec + Bnvec, n) ;
nthreads = GB_nthreads (work, chunk, nthreads_max) ;
ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
ntasks = GB_IMIN (ntasks, work) ;
// allocate workspace
GB_MALLOC_MEMORY (kA_start, ntasks+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (kB_start, ntasks+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (kC_start, ntasks+1, sizeof (int64_t)) ;
if (kA_start == NULL || kB_start == NULL || kC_start == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
kA_start [0] = (Anvec == 0) ? -1 : 0 ;
kB_start [0] = (Bnvec == 0) ? -1 : 0 ;
kA_start [ntasks] = (Anvec == 0) ? -1 : Anvec ;
kB_start [ntasks] = (Bnvec == 0) ? -1 : Bnvec ;
for (int taskid = 1 ; taskid < ntasks ; taskid++)
{
// create tasks: A and B are both hyper
double target_work = ((ntasks-taskid) * work) / ntasks ;
GB_slice_vector (NULL, NULL,
&(kA_start [taskid]), &(kB_start [taskid]),
0, 0, NULL, // Mi not present
0, Anvec, Ah, A_hfirst, // Ah, explicit or implicit list
0, Bnvec, Bh, // Bh, explicit list
n, // Ah and Bh have dimension n
target_work) ;
}
//----------------------------------------------------------------------
// phase 1: count the entries in the result of each task
//----------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
int64_t kC = 0 ;
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// jA appears in A but not B
kA++ ;
}
else if (jB < jA)
{
// jB appears in B but not A
kB++ ;
}
else
{
// j = jA = jB appears in both A and B
kA++ ;
kB++ ;
}
}
kC_start [taskid] = kC + (kA_end - kA) + (kB_end - kB) ;
}
//----------------------------------------------------------------------
// phase 1b: cumulative sum of entries for each task
//----------------------------------------------------------------------
GB_cumsum (kC_start, ntasks, NULL, 1) ;
Cnvec = kC_start [ntasks] ;
//----------------------------------------------------------------------
// allocate the result
//----------------------------------------------------------------------
// C will be hypersparse, so Ch is allocated. The mask M is ignored
// for computing Ch. Ch is the set union of Ah and Bh.
if (!GB_allocate_result (Cnvec, &Ch,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// phase 2: compute the result
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kC = kC_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
// merge Ah and Bh into Ch
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = -1 ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
Ch [kC] = jB ;
C_to_A [kC] = -1 ; // jB does not appear in A
C_to_B [kC] = kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = kB++ ;
}
}
if (kA < kA_end)
{
// B is exhausted but A is not
for ( ; kA < kA_end ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
Ch [kC] = jA ;
C_to_A [kC] = kA ;
C_to_B [kC] = -1 ;
}
}
else if (kB < kB_end)
{
// A is exhausted but B is not
for ( ; kB < kB_end ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
Ch [kC] = jB ;
C_to_A [kC] = -1 ;
C_to_B [kC] = kB ;
}
}
ASSERT (kC == kC_start [taskid+1]) ;
}
//----------------------------------------------------------------------
// check result via a sequential merge
//----------------------------------------------------------------------
#ifdef GB_DEBUG
// merge Ah and Bh into Ch
int64_t kA = 0 ;
int64_t kB = 0 ;
int64_t kC = 0 ;
for ( ; kA < Anvec && kB < Bnvec ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == -1) ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ; // jB does not appear in A
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
}
if (kA < Anvec)
{
// B is exhausted but A is not
for ( ; kA < Anvec ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ;
ASSERT (C_to_B [kC] == -1) ;
}
}
else if (kB < Bnvec)
{
// A is exhausted but B is not
for ( ; kB < Bnvec ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ;
ASSERT (C_to_B [kC] == kB) ;
}
}
ASSERT (kC == Cnvec) ;
#endif
}
else if ((A_is_hyper || A_is_slice) && !B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse, B is standard
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_A mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < n ; j++)
{
C_to_A [j] = -1 ;
}
// scatter Ah into C_to_A
int64_t kA ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kA = 0 ; kA < Anvec ; kA++)
{
int64_t jA = GB_Ah (kA) ;
C_to_A [jA] = kA ;
}
}
else if (!(A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is standard, B is hypersparse
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_B mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < n ; j++)
{
C_to_B [j] = -1 ;
}
// scatter Bh into C_to_B
int64_t kB ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kB = 0 ; kB < Bnvec ; kB++)
{
int64_t jB = Bh [kB] ;
C_to_B [jB] = kB ;
}
}
else
{
//----------------------------------------------------------------------
// A and B are both standard
//----------------------------------------------------------------------
// C will be standard
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// construct C_to_M if needed
//--------------------------------------------------------------------------
if (C_to_M != NULL)
{
if (Ch != NULL)
{
// C is hypersparse
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
// C_to_M [k] = kM if Mh [kM] == j and M(:,j) is non-empty
int64_t kM = 0, pM, pM_end ;
GB_lookup (true, Mh, Mp, &kM, Mnvec-1, j, &pM, &pM_end) ;
C_to_M [k] = (pM < pM_end) ? kM : -1 ;
}
}
else
{
// C is standard
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < n ; j++)
{
C_to_M [j] = -1 ;
}
// scatter Mh into C_to_M
int64_t kM ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kM = 0 ; kM < Mnvec ; kM++)
{
int64_t jM = Mh [kM] ;
C_to_M [jM] = kM ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
(*p_Cnvec ) = Cnvec ;
if (p_Ch_is_Mh != NULL)
{
// return Ch_is_Mh to GB_add. For GB_masker, Ch is never Mh.
(*p_Ch_is_Mh) = Ch_is_Mh ;
}
(*Ch_handle ) = Ch ;
(*C_to_A_handle) = C_to_A ;
(*C_to_B_handle) = C_to_B ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = C_to_M ;
}
//--------------------------------------------------------------------------
// The code below describes what the output contains:
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
ASSERT (A != NULL) ; // A and B are always present
ASSERT (B != NULL) ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < Cnvec ; k++)
{
// C(:,j) is in the list, as the kth vector
int64_t j ;
if (Ch == NULL)
{
// C will be constructed as standard sparse
j = k ;
}
else
{
// C will be constructed as hypersparse
j = Ch [k] ;
}
// vectors j in Ch are sorted, and in the range 0:n-1
ASSERT (j >= 0 && j < n) ;
ASSERT (j > jlast) ;
jlast = j ;
// see if A (:,j) exists
if (C_to_A != NULL)
{
// A is hypersparse, or a slice
ASSERT (A->is_hyper || A->is_slice) ;
int64_t kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
int64_t jA = GB_Ah (kA) ;
ASSERT (j == jA) ;
}
}
else
{
// A is in standard sparse form
// C_to_A exists only if A is hypersparse
ASSERT (!(A->is_hyper || A->is_slice)) ;
}
// see if B (:,j) exists
if (C_to_B != NULL)
{
// B is hypersparse
ASSERT (B->is_hyper) ;
int64_t kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
int64_t jB = B->h [kB] ;
ASSERT (j == jB) ;
}
}
else
{
// B is in standard sparse form
// C_to_B exists only if B is hypersparse
ASSERT (!B->is_hyper) ;
}
// see if M (:,j) exists
if (Ch_is_Mh)
{
// Ch is the same as Mh
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ;
ASSERT (C_to_M == NULL) ;
}
else if (C_to_M != NULL)
{
// M is present and hypersparse
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
int64_t kM = C_to_M [k] ;
ASSERT (kM >= -1 && kM < M->nvec) ;
if (kM >= 0)
{
int64_t jM = M->h [kM] ;
ASSERT (j == jM) ;
}
}
else
{
// M is not present, or in standard form
ASSERT (M == NULL || !(M->is_hyper)) ;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
GB_binop__lxor_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint64)
// A*D function (colscale): GB (_AxD__lxor_uint64)
// D*A function (rowscale): GB (_DxB__lxor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint64)
// C=scalar+B GB (_bind1st__lxor_uint64)
// C=scalar+B' GB (_bind1st_tran__lxor_uint64)
// C=A+scalar GB (_bind2nd__lxor_uint64)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT64 || GxB_NO_LXOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp_hello.c | /*
Hello, World program for OpenMP.
Jim Teresco, Siena College, Fall 2021
Based on example by Blaise Barney, LLNL at
https://hpc.llnl.gov/tuts/openMP/#ProgrammingModel
*/
#include <omp.h>
#include <stdio.h>
int main (int argc, char *argv[]) {
// These variables will be used to store the number of threads and
// thread ID for each when we enter a parallel code segment.
int num_threads, thread_num;
printf("Hello, World! from the start of the main function\n");
// This compiler directive says that we want to create a team
// of threads to run the subsequent code block in parallel.
//
// Each thread will get its own copy of each of the variables listed
// in the private clause.
#pragma omp parallel private(num_threads, thread_num)
{
// Get our thread number -- will be 0 to num_threads - 1.
thread_num = omp_get_thread_num();
printf("Hello, World! from thread %d\n", thread_num);
// We'll have only thread 0 print the number of threads.
if (thread_num == 0) {
num_threads = omp_get_num_threads();
printf("Number of threads = %d\n", num_threads);
}
}
// at this point, the team of threads all join back to the original
// thread and only that original remains in existence.
printf("Goodbye, World! from the original thread\n");
return 0;
}
|
trmv_x_csc_u_hi.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
trmv_csc_u_hi_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->cols_end, n, thread_num, partition);
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_n_s = partition[tid];
const ALPHA_INT local_n_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number) * m);
for(ALPHA_INT j = 0; j < m; ++j) {
alpha_setzero(tmp[tid][j]);
}
for(ALPHA_INT i = local_n_s; i < local_n_e; ++i)
{
const ALPHA_Number x_r = x[i];
register ALPHA_Number tmp_t;
alpha_setzero(tmp_t);
ALPHA_INT cs = A->cols_start[i];
ALPHA_INT ce = A->cols_end[i];
for(; cs < ce-3; cs += 4)
{
const ALPHA_INT row_0 = A->row_indx[cs];
const ALPHA_INT row_1 = A->row_indx[cs+1];
const ALPHA_INT row_2 = A->row_indx[cs+2];
const ALPHA_INT row_3 = A->row_indx[cs+3];
if(row_3 < i)
{
alpha_mul(tmp_t, A->values[cs], x_r);
alpha_madde(tmp[tid][row_0], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+1], x_r);
alpha_madde(tmp[tid][row_1], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+2], x_r);
alpha_madde(tmp[tid][row_2], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+3], x_r);
alpha_madde(tmp[tid][row_3], alpha, tmp_t);
}else if (row_2 < i){
alpha_mul(tmp_t, A->values[cs], x_r);
alpha_madde(tmp[tid][row_0], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+1], x_r);
alpha_madde(tmp[tid][row_1], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+2], x_r);
alpha_madde(tmp[tid][row_2], alpha, tmp_t);
}else if (row_1 < i){
alpha_mul(tmp_t, A->values[cs], x_r);
alpha_madde(tmp[tid][row_0], alpha, tmp_t);
alpha_mul(tmp_t, A->values[cs+1], x_r);
alpha_madde(tmp[tid][row_1], alpha, tmp_t);
}else if (row_0 < i){
alpha_mul(tmp_t, A->values[cs], x_r);
alpha_madde(tmp[tid][row_0], alpha, tmp_t);
}
}
for (;cs < ce;++cs)
{
const ALPHA_INT row = A->row_indx[cs];
if (row < i){
alpha_mul(tmp_t, A->values[cs], x_r);
alpha_madde(tmp[tid][row], alpha, tmp_t);
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_madde(tmp_y, alpha, x[i]);
alpha_madde(tmp_y, y[i], beta);
y[i] = tmp_y;
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return trmv_csc_u_hi_omp(alpha, A, x, beta, y);
}
|
tally.h | #ifndef OPENMC_TALLIES_TALLY_H
#define OPENMC_TALLIES_TALLY_H
#include "openmc/constants.h"
#include "openmc/tallies/filter.h"
#include "openmc/tallies/trigger.h"
#include <gsl/gsl>
#include "pugixml.hpp"
#include "xtensor/xfixed.hpp"
#include "xtensor/xtensor.hpp"
#include <memory> // for unique_ptr
#include <unordered_map>
#include <string>
#include <vector>
namespace openmc {
//==============================================================================
//! A user-specified flux-weighted (or current) measurement.
//==============================================================================
class Tally {
public:
//----------------------------------------------------------------------------
// Constructors, destructors, factory functions
explicit Tally(int32_t id);
explicit Tally(pugi::xml_node node);
~Tally();
static Tally* create(int32_t id = -1);
//----------------------------------------------------------------------------
// Accessors
void set_id(int32_t id);
void set_active(bool active) { active_ = active; }
void set_writable(bool writable) { writable_ = writable; }
void set_scores(pugi::xml_node node);
void set_scores(const std::vector<std::string>& scores);
void set_nuclides(pugi::xml_node node);
void set_nuclides(const std::vector<std::string>& nuclides);
const std::vector<int32_t>& filters() const {return filters_;}
int32_t filters(int i) const {return filters_[i];}
void set_filters(gsl::span<Filter*> filters);
int32_t strides(int i) const {return strides_[i];}
int32_t n_filter_bins() const {return n_filter_bins_;}
bool writable() const { return writable_;}
//----------------------------------------------------------------------------
// Other methods.
void add_filter(Filter* filter) { set_filters({&filter, 1}); }
void init_triggers(pugi::xml_node node);
void init_results();
void reset();
void accumulate();
//----------------------------------------------------------------------------
// Major public data members.
int id_ {C_NONE}; //!< User-defined identifier
std::string name_; //!< User-defined name
int type_ {TALLY_VOLUME}; //!< e.g. volume, surface current
//! Event type that contributes to this tally
int estimator_ {ESTIMATOR_TRACKLENGTH};
//! Whether this tally is currently being updated
bool active_ {false};
//! Number of realizations
int n_realizations_ {0};
std::vector<int> scores_; //!< Filter integrands (e.g. flux, fission)
//! Index of each nuclide to be tallied. -1 indicates total material.
std::vector<int> nuclides_ {-1};
//! True if this tally has a bin for every nuclide in the problem
bool all_nuclides_ {false};
//! Results for each bin -- the first dimension of the array is for scores
//! (e.g. flux, total reaction rate, fission reaction rate, etc.) and the
//! second dimension of the array is for the combination of filters
//! (e.g. specific cell, specific energy group, etc.)
xt::xtensor<double, 3> results_;
//! True if this tally should be written to statepoint files
bool writable_ {true};
//----------------------------------------------------------------------------
// Miscellaneous public members.
// We need to have quick access to some filters. The following gives indices
// for various filters that could be in the tally or C_NONE if they are not
// present.
int energyout_filter_ {C_NONE};
int delayedgroup_filter_ {C_NONE};
bool depletion_rx_ {false}; //!< Has depletion reactions (e.g. (n,2n))
std::vector<Trigger> triggers_;
int deriv_ {C_NONE}; //!< Index of a TallyDerivative object for diff tallies.
private:
//----------------------------------------------------------------------------
// Private data.
std::vector<int32_t> filters_; //!< Filter indices in global filters array
//! Index strides assigned to each filter to support 1D indexing.
std::vector<int32_t> strides_;
int32_t n_filter_bins_ {0};
gsl::index index_;
};
//==============================================================================
// Global variable declarations
//==============================================================================
namespace model {
extern std::vector<std::unique_ptr<Tally>> tallies;
extern std::vector<int> active_tallies;
extern std::vector<int> active_analog_tallies;
extern std::vector<int> active_tracklength_tallies;
extern std::vector<int> active_collision_tallies;
extern std::vector<int> active_meshsurf_tallies;
extern std::vector<int> active_surface_tallies;
extern std::vector<int> active_surface_track;
extern std::unordered_map<int, int> tally_map;
}
namespace simulation {
//! Global tallies (such as k-effective estimators)
extern xt::xtensor_fixed<double, xt::xshape<N_GLOBAL_TALLIES, 3>> global_tallies;
//! Number of realizations for global tallies
extern "C" int32_t n_realizations;
}
// It is possible to protect accumulate operations on global tallies by using an
// atomic update. However, when multiple threads accumulate to the same global
// tally, it can cause a higher cache miss rate due to invalidation. Thus, we
// use threadprivate variables to accumulate global tallies and then reduce at
// the end of a generation.
extern double global_tally_absorption;
extern double global_tally_collision;
extern double global_tally_tracklength;
extern double global_tally_leakage;
#pragma omp threadprivate(global_tally_absorption, global_tally_collision, \
global_tally_tracklength, global_tally_leakage)
//==============================================================================
// Non-member functions
//==============================================================================
//! Read tally specification from tallies.xml
void read_tallies_xml();
//! \brief Accumulate the sum of the contributions from each history within the
//! batch to a new random variable
void accumulate_tallies();
//! Determine which tallies should be active
void setup_active_tallies();
// Alias for the type returned by xt::adapt(...). N is the dimension of the
// multidimensional array
template <std::size_t N>
using adaptor_type = xt::xtensor_adaptor<xt::xbuffer_adaptor<double*&, xt::no_ownership>, N>;
#ifdef OPENMC_MPI
//! Collect all tally results onto master process
void reduce_tally_results();
#endif
void free_memory_tally();
} // namespace openmc
#endif // OPENMC_TALLIES_TALLY_H
|
field.c | /*****************************************************************************
*
* field.c
*
* Rank 1 objects: scalar fields, vector fields, and compressed tensor Q_ab.
*
* The data storage order is determined in memory.h.
*
* Lees-Edwards transformation is supported provided the lees_edw_t
* object is supplied at initialisation time. Otherwise, the normal
* cs_t coordinate system applies.
*
*
* Edinburgh Soft Matter and Statistical Physics Group and
* Edinburgh Parallel Computing Centre
*
* (c) 2012-2022 The University of Edinburgh
*
* Contributing authors:
* Kevin Stratford (kevin@epcc.ed.ac.uk)
* Aln Gray (alang@epcc.ed.ac.uk)
*
*****************************************************************************/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pe.h"
#include "coords.h"
#include "leesedwards.h"
#include "io_harness.h"
#include "timer.h"
#include "util.h"
#include "field.h"
static int field_write(FILE * fp, int index, void * self);
static int field_write_ascii(FILE * fp, int index, void * self);
static int field_read(FILE * fp, int index, void * self);
static int field_read_ascii(FILE * fp, int index, void * self);
static int field_leesedwards_parallel(field_t * obj);
__host__ int field_init(field_t * obj, int nhcomm, lees_edw_t * le);
/*****************************************************************************
*
* field_create
*
* le_t * le may be NULL if no Lees Edwards planes are present.
*
* This just sets the type of field; often order parameter, e.g.,:
* nf = 1 for scalar "phi"
* nf = 3 for vector "p"
* nf = 5 for tensor "q" (compressed traceless, symmetric)
*
*****************************************************************************/
__host__ int field_create(pe_t * pe, cs_t * cs, lees_edw_t * le,
const char * name,
const field_options_t * opts,
field_t ** pobj) {
field_t * obj = NULL;
assert(pe);
assert(cs);
assert(name);
assert(opts);
assert(pobj);
if (field_options_valid(opts) == 0) {
pe_fatal(pe, "Internal error: invalid field options\n");
}
obj = (field_t *) calloc(1, sizeof(field_t));
assert(obj);
if (obj == NULL) pe_fatal(pe, "calloc(obj) failed\n");
obj->nf = opts->ndata;
obj->name = (char *) calloc(strlen(name) + 1, sizeof(char));
assert(obj->name);
if (obj->name == NULL) pe_fatal(pe, "calloc(name) failed\n");
strncpy(obj->name, name, imin(strlen(name), BUFSIZ));
obj->name[strlen(name)] = '\0';
obj->pe = pe;
obj->cs = cs;
pe_retain(pe);
cs_retain(cs);
field_init(obj, opts->nhcomm, le);
field_halo_create(obj, &obj->h);
obj->opts = *opts;
if (obj->opts.haloverbose) field_halo_info(obj);
*pobj = obj;
return 0;
}
/*****************************************************************************
*
* field_free
*
*****************************************************************************/
__host__ int field_free(field_t * obj) {
int ndevice;
double * tmp;
assert(obj);
tdpGetDeviceCount(&ndevice);
if (ndevice > 0) {
tdpMemcpy(&tmp, &obj->target->data, sizeof(double *),
tdpMemcpyDeviceToHost);
tdpFree(tmp);
tdpFree(obj->target);
}
if (obj->data) free(obj->data);
if (obj->name) free(obj->name);
if (obj->halo) halo_swap_free(obj->halo);
if (obj->info) io_info_free(obj->info);
field_halo_free(&obj->h);
cs_free(obj->cs);
pe_free(obj->pe);
free(obj);
return 0;
}
/*****************************************************************************
*
* field_init
*
* Initialise the lattice data, MPI halo information.
*
* The le_t may be NULL, in which case create an instance with
* no planes.
*
* TODO:
* The behaviour with no planes (cs_t only) could be refactored
* into two separate classes.
*
*****************************************************************************/
__host__ int field_init(field_t * obj, int nhcomm, lees_edw_t * le) {
int ndevice;
int nsites;
size_t nfsz;
double * tmp;
assert(obj);
assert(obj->data == NULL);
cs_nsites(obj->cs, &nsites);
if (le) lees_edw_nsites(le, &nsites);
obj->le = le;
obj->nhcomm = nhcomm;
obj->nsites = nsites;
nfsz = (size_t) obj->nf*nsites;
#ifndef OLD_DATA
obj->data = (double *) calloc(nfsz, sizeof(double));
if (obj->data == NULL) pe_fatal(obj->pe, "calloc(obj->data) failed\n");
#else
obj->data = (double *) mem_aligned_malloc(MEM_PAGESIZE, nfsz*sizeof(double));
if (obj->data == NULL) pe_fatal(obj->pe, "calloc(obj->data) failed\n");
#endif
/* Allocate target copy of structure (or alias) */
tdpGetDeviceCount(&ndevice);
if (ndevice == 0) {
obj->target = obj;
}
else {
cs_t * cstarget = NULL;
lees_edw_t * letarget = NULL;
tdpMalloc((void **) &obj->target, sizeof(field_t));
tdpMalloc((void **) &tmp, nfsz*sizeof(double));
tdpMemcpy(&obj->target->data, &tmp, sizeof(double *),
tdpMemcpyHostToDevice);
cs_target(obj->cs, &cstarget);
if (le) lees_edw_target(obj->le, &letarget);
tdpMemcpy(&obj->target->cs, &cstarget, sizeof(cs_t *),
tdpMemcpyHostToDevice);
tdpMemcpy(&obj->target->le, &letarget, sizeof(lees_edw_t *),
tdpMemcpyHostToDevice);
field_memcpy(obj, tdpMemcpyHostToDevice);
}
/* MPI datatypes for halo */
halo_swap_create_r1(obj->pe, obj->cs, nhcomm, nsites, obj->nf, &obj->halo);
assert(obj->halo);
halo_swap_handlers_set(obj->halo, halo_swap_pack_rank1, halo_swap_unpack_rank1);
return 0;
}
/*****************************************************************************
*
* field_memcpy
*
*****************************************************************************/
__host__ int field_memcpy(field_t * obj, tdpMemcpyKind flag) {
int ndevice;
size_t nfsz;
double * tmp;
tdpGetDeviceCount(&ndevice);
if (ndevice == 0) {
/* Ensure we alias */
assert(obj->target == obj);
}
else {
nfsz = (size_t) obj->nf*obj->nsites;
tdpMemcpy(&tmp, &obj->target->data, sizeof(double *),
tdpMemcpyDeviceToHost);
switch (flag) {
case tdpMemcpyHostToDevice:
tdpMemcpy(&obj->target->nf, &obj->nf, sizeof(int), flag);
tdpMemcpy(&obj->target->nhcomm, &obj->nhcomm, sizeof(int), flag);
tdpMemcpy(&obj->target->nsites, &obj->nsites, sizeof(int), flag);
tdpMemcpy(tmp, obj->data, nfsz*sizeof(double), flag);
break;
case tdpMemcpyDeviceToHost:
tdpMemcpy(obj->data, tmp, nfsz*sizeof(double), flag);
break;
default:
pe_fatal(obj->pe, "Bad flag in field_memcpy\n");
break;
}
}
return 0;
}
/*****************************************************************************
*
* field_nf
*
*****************************************************************************/
__host__ __device__ int field_nf(field_t * obj, int * nf) {
assert(obj);
assert(nf);
*nf = obj->nf;
return 0;
}
/*****************************************************************************
*
* field_init_io_info
*
*****************************************************************************/
__host__ int field_init_io_info(field_t * obj, int grid[3], int form_in,
int form_out) {
io_info_args_t args = io_info_args_default();
assert(obj);
assert(obj->info == NULL);
args.grid[X] = grid[X];
args.grid[Y] = grid[Y];
args.grid[Z] = grid[Z];
io_info_create(obj->pe, obj->cs, &args, &obj->info);
if (obj->info == NULL) pe_fatal(obj->pe, "io_info_create(field) failed\n");
io_info_set_name(obj->info, obj->name);
io_info_write_set(obj->info, IO_FORMAT_BINARY, field_write);
io_info_write_set(obj->info, IO_FORMAT_ASCII, field_write_ascii);
io_info_read_set(obj->info, IO_FORMAT_BINARY, field_read);
io_info_read_set(obj->info, IO_FORMAT_ASCII, field_read_ascii);
/* ASCII format size is 23 bytes per element plus a '\n' */
io_info_set_bytesize(obj->info, IO_FORMAT_BINARY, obj->nf*sizeof(double));
io_info_set_bytesize(obj->info, IO_FORMAT_ASCII, (obj->nf*23 + 1));
io_info_format_set(obj->info, form_in, form_out);
io_info_metadata_filestub_set(obj->info, obj->name);
return 0;
}
/*****************************************************************************
*
* field_io_info
*
*****************************************************************************/
__host__ int field_io_info(field_t * obj, io_info_t ** info) {
assert(obj);
assert(obj->info);
assert(info);
*info = obj->info;
return 0;
}
/*****************************************************************************
*
* field_halo
*
*****************************************************************************/
__host__ int field_halo(field_t * obj) {
int nlocal[3];
assert(obj);
cs_nlocal(obj->cs, nlocal);
if (nlocal[Z] < obj->nhcomm) {
/* This constraint means can't use target method;
* this also requires a copy if the address spaces are distinct. */
field_memcpy(obj, tdpMemcpyDeviceToHost);
field_halo_swap(obj, FIELD_HALO_HOST);
field_memcpy(obj, tdpMemcpyHostToDevice);
}
else {
/* Default to ... */
field_halo_swap(obj, obj->opts.haloscheme);
}
return 0;
}
/*****************************************************************************
*
* field_halo_swap
*
*****************************************************************************/
__host__ int field_halo_swap(field_t * obj, field_halo_enum_t flag) {
double * data;
assert(obj);
switch (flag) {
case FIELD_HALO_HOST:
halo_swap_host_rank1(obj->halo, obj->data, MPI_DOUBLE);
break;
case FIELD_HALO_TARGET:
tdpMemcpy(&data, &obj->target->data, sizeof(double *),
tdpMemcpyDeviceToHost);
halo_swap_packed(obj->halo, data);
break;
case FIELD_HALO_OPENMP:
field_halo_post(obj, &obj->h);
field_halo_wait(obj, &obj->h);
break;
default:
assert(0);
}
return 0;
}
/*****************************************************************************
*
* field_leesedwards
*
* Interpolate the phi field to take account of any local Lees Edwards
* boundaries.
*
* The buffer region of obj->data[] is updated with the interpolated
* values.
*
*****************************************************************************/
__host__ int field_leesedwards(field_t * obj) {
int nf;
int nhalo;
int nlocal[3]; /* Local system size */
int nxbuffer; /* Number of buffer planes */
int ib; /* Index in buffer region */
int ib0; /* buffer region offset */
int ic; /* Index corresponding x location in real system */
int jc, kc, n;
int index, index0, index1, index2, index3;
int mpi_cartsz[3];
double dy; /* Displacement for current ic->ib pair */
double fr; /* Fractional displacement */
double ltot[3];
const double r6 = (1.0/6.0);
int jdy; /* Integral part of displacement */
int j0, j1, j2, j3; /* j values in real system to interpolate between */
assert(obj);
assert(obj->data);
if (obj->le == NULL) return 0;
cs_ltot(obj->cs, ltot);
cs_cartsz(obj->cs, mpi_cartsz);
/* At the moment we require some copies for device version ... */
/* ... here and at the end. */
{
int nplane = lees_edw_nplane_total(obj->le);
if (nplane > 0) field_memcpy(obj, tdpMemcpyDeviceToHost);
}
if (mpi_cartsz[Y] > 1) {
/* This has its own routine. */
field_leesedwards_parallel(obj);
}
else {
/* No messages are required... */
nf = obj->nf;
cs_nhalo(obj->cs, &nhalo);
cs_nlocal(obj->cs, nlocal);
lees_edw_nxbuffer(obj->le, &nxbuffer);
ib0 = nlocal[X] + nhalo + 1;
for (ib = 0; ib < nxbuffer; ib++) {
ic = lees_edw_ibuff_to_real(obj->le, ib);
lees_edw_buffer_dy(obj->le, ib, 0.0, &dy);
dy = fmod(dy, ltot[Y]);
jdy = floor(dy);
fr = 1.0 - (dy - jdy);
for (jc = 1 - nhalo; jc <= nlocal[Y] + nhalo; jc++) {
/* Note that a linear interpolation here would involve
* (1.0 - fr)*phi(ic,j1,kc) + fr*phi(ic,j2,kc)
* This is just Lagrange four-point instead. */
j0 = 1 + (jc - jdy - 3 + 2*nlocal[Y]) % nlocal[Y];
j1 = 1 + j0 % nlocal[Y];
j2 = 1 + j1 % nlocal[Y];
j3 = 1 + j2 % nlocal[Y];
for (kc = 1 - nhalo; kc <= nlocal[Z] + nhalo; kc++) {
index = lees_edw_index(obj->le, ib0 + ib, jc, kc);
index0 = lees_edw_index(obj->le, ic, j0, kc);
index1 = lees_edw_index(obj->le, ic, j1, kc);
index2 = lees_edw_index(obj->le, ic, j2, kc);
index3 = lees_edw_index(obj->le, ic, j3, kc);
for (n = 0; n < nf; n++) {
obj->data[addr_rank1(obj->nsites, nf, index, n)] =
- r6*fr*(fr-1.0)*(fr-2.0)*obj->data[addr_rank1(obj->nsites, nf, index0, n)]
+ 0.5*(fr*fr-1.0)*(fr-2.0)*obj->data[addr_rank1(obj->nsites, nf, index1, n)]
- 0.5*fr*(fr+1.0)*(fr-2.0)*obj->data[addr_rank1(obj->nsites, nf, index2, n)]
+ r6*fr*(fr*fr-1.0)*obj->data[addr_rank1(obj->nsites, nf, index3, n)];
}
}
}
}
}
{
int nplane = lees_edw_nplane_total(obj->le);
if (nplane > 0) field_memcpy(obj, tdpMemcpyHostToDevice);
}
return 0;
}
/*****************************************************************************
*
* field_leesedwards_parallel
*
* The Lees Edwards transformation requires a certain amount of
* communication in parallel.
*
* As we are using a 4-point interpolation, there is a requirement
* to communicate with as many as three different processors to
* send/receive appropriate interpolated values.
*
* Note that the sends only involve the 'real' system, so there is
* no requirement that the halos be up-to-date (although it is
* expected that they will be for the gradient calculation).
*
*****************************************************************************/
static int field_leesedwards_parallel(field_t * obj) {
int nf;
int nlocal[3]; /* Local system size */
int noffset[3]; /* Local starting offset */
int nxbuffer; /* Number of buffer planes */
int ib; /* Index in buffer region */
int ib0; /* buffer region offset */
int ic; /* Index corresponding x location in real system */
int jc, kc;
int j0, j1, j2, j3;
int n, n1, n2, n3;
int nhalo;
int jdy; /* Integral part of displacement */
int index;
int ntotal[3];
double dy; /* Displacement for current ic->ib pair */
double fr; /* Fractional displacement */
double ltot[3];
const double r6 = (1.0/6.0);
int nsend; /* Send buffer size */
int nrecv; /* Recv buffer size */
int nrank_s[3]; /* send ranks */
int nrank_r[3]; /* recv ranks */
double * sendbuf = NULL; /* Send buffer */
double * recvbuf = NULL; /* Interpolation buffer */
const int tag0 = 1256;
const int tag1 = 1257;
const int tag2 = 1258;
MPI_Comm le_comm;
MPI_Request request[6];
MPI_Status status[3];
assert(obj);
assert(obj->le);
field_nf(obj, &nf);
cs_ltot(obj->cs, ltot);
cs_nhalo(obj->cs, &nhalo);
cs_nlocal(obj->cs, nlocal);
cs_ntotal(obj->cs, ntotal);
cs_nlocal_offset(obj->cs, noffset);
ib0 = nlocal[X] + nhalo + 1;
lees_edw_comm(obj->le, &le_comm);
lees_edw_nxbuffer(obj->le, &nxbuffer);
/* Allocate buffer space */
nsend = nf*nlocal[Y]*(nlocal[Z] + 2*nhalo);
nrecv = nf*(nlocal[Y] + 2*nhalo + 3)*(nlocal[Z] + 2*nhalo);
sendbuf = (double *) malloc(nsend*sizeof(double));
recvbuf = (double *) malloc(nrecv*sizeof(double));
if (sendbuf == NULL) pe_fatal(obj->pe, "malloc(sendbuf) failed\n");
if (recvbuf == NULL) pe_fatal(obj->pe, "malloc(recvbuf) failed\n");
/* One round of communication for each buffer plane */
for (ib = 0; ib < nxbuffer; ib++) {
ic = lees_edw_ibuff_to_real(obj->le, ib);
/* Work out the displacement-dependent quantities */
lees_edw_buffer_dy(obj->le, ib, 0.0, &dy);
dy = fmod(dy, ltot[Y]);
jdy = floor(dy);
fr = 1.0 - (dy - jdy);
/* In the real system the first point we require is
* j1 = jc - jdy - 3
* with jc = noffset[Y] + 1 - nhalo in the global coordinates.
* Modular arithmetic ensures 1 <= j1 <= ntotal[Y] */
jc = noffset[Y] + 1 - nhalo;
j1 = 1 + (jc - jdy - 3 + 2*ntotal[Y]) % ntotal[Y];
assert(j1 >= 1);
assert(j1 <= ntotal[Y]);
lees_edw_jstart_to_mpi_ranks(obj->le, j1, nrank_s, nrank_r);
/* Local quantities: j2 is the position of j1 in local coordinates.
* The three sections to send/receive are organised as follows:
* jc is the number of j points in each case, while n is the
* total number of data items. Note that n3 can be zero. */
j2 = 1 + (j1 - 1) % nlocal[Y];
assert(j2 >= 1);
assert(j2 <= nlocal[Y]);
jc = nlocal[Y] - j2 + 1;
n1 = nf*jc*(nlocal[Z] + 2*nhalo);
MPI_Irecv(recvbuf, n1, MPI_DOUBLE, nrank_r[0], tag0, le_comm, request);
jc = imin(nlocal[Y], j2 + 2*nhalo + 2);
n2 = nf*jc*(nlocal[Z] + 2*nhalo);
MPI_Irecv(recvbuf + n1, n2, MPI_DOUBLE, nrank_r[1], tag1, le_comm,
request + 1);
jc = imax(0, j2 - nlocal[Y] + 2*nhalo + 2);
n3 = nf*jc*(nlocal[Z] + 2*nhalo);
MPI_Irecv(recvbuf + n1 + n2, n3, MPI_DOUBLE, nrank_r[2], tag2, le_comm,
request + 2);
/* Load contiguous send buffer */
for (jc = 1; jc <= nlocal[Y]; jc++) {
for (kc = 1 - nhalo; kc <= nlocal[Z] + nhalo; kc++) {
index = lees_edw_index(obj->le, ic, jc, kc);
for (n = 0; n < nf; n++) {
j0 = nf*(jc - 1)*(nlocal[Z] + 2*nhalo);
j1 = j0 + nf*(kc + nhalo - 1);
assert((j1+n) >= 0 && (j1+n) < nsend);
sendbuf[j1+n] = obj->data[addr_rank1(obj->nsites, nf, index, n)];
}
}
}
/* Post sends and wait for receives. */
index = nf*(j2 - 1)*(nlocal[Z] + 2*nhalo);
MPI_Issend(sendbuf+index, n1, MPI_DOUBLE, nrank_s[0], tag0, le_comm,
request + 3);
MPI_Issend(sendbuf, n2, MPI_DOUBLE, nrank_s[1], tag1, le_comm,
request + 4);
MPI_Issend(sendbuf, n3, MPI_DOUBLE, nrank_s[2], tag2, le_comm,
request + 5);
MPI_Waitall(3, request, status);
/* Perform the actual interpolation from temporary buffer. */
for (jc = 1 - nhalo; jc <= nlocal[Y] + nhalo; jc++) {
/* Note that the linear interpolation here would be
* (1.0-fr)*buffer(j1, k, n) + fr*buffer(j2, k, n)
* This is again Lagrange four point. */
j0 = (jc + nhalo - 1 )*(nlocal[Z] + 2*nhalo);
j1 = (jc + nhalo - 1 + 1)*(nlocal[Z] + 2*nhalo);
j2 = (jc + nhalo - 1 + 2)*(nlocal[Z] + 2*nhalo);
j3 = (jc + nhalo - 1 + 3)*(nlocal[Z] + 2*nhalo);
for (kc = 1 - nhalo; kc <= nlocal[Z] + nhalo; kc++) {
index = lees_edw_index(obj->le, ib0 + ib, jc, kc);
for (n = 0; n < nf; n++) {
obj->data[addr_rank1(obj->nsites, nf, index, n)] =
- r6*fr*(fr-1.0)*(fr-2.0)*recvbuf[nf*(j0 + kc+nhalo-1) + n]
+ 0.5*(fr*fr-1.0)*(fr-2.0)*recvbuf[nf*(j1 + kc+nhalo-1) + n]
- 0.5*fr*(fr+1.0)*(fr-2.0)*recvbuf[nf*(j2 + kc+nhalo-1) + n]
+ r6*fr*(fr*fr-1.0)*recvbuf[nf*(j3 + kc+nhalo-1) + n];
}
}
}
/* Clean up the sends, and move to next buffer location. */
MPI_Waitall(3, request + 3, status);
}
free(recvbuf);
free(sendbuf);
return 0;
}
/*****************************************************************************
*
* field_scalar
*
*****************************************************************************/
__host__ __device__
int field_scalar(field_t * obj, int index, double * phi) {
assert(obj);
assert(obj->nf == 1);
assert(obj->data);
assert(phi);
*phi = obj->data[addr_rank1(obj->nsites, 1, index, 0)];
return 0;
}
/*****************************************************************************
*
* field_scalar_set
*
*****************************************************************************/
__host__ __device__
int field_scalar_set(field_t * obj, int index, double phi) {
assert(obj);
assert(obj->nf == 1);
assert(obj->data);
obj->data[addr_rank1(obj->nsites, 1, index, 0)] = phi;
return 0;
}
/*****************************************************************************
*
* field_vector
*
*****************************************************************************/
__host__ __device__
int field_vector(field_t * obj, int index, double p[3]) {
int ia;
assert(obj);
assert(obj->nf == 3);
assert(obj->data);
for (ia = 0; ia < 3; ia++) {
p[ia] = obj->data[addr_rank1(obj->nsites, 3, index, ia)];
}
return 0;
}
/*****************************************************************************
*
* field_vector_set
*
*****************************************************************************/
__host__ __device__
int field_vector_set(field_t * obj, int index, const double p[3]) {
int ia;
assert(obj);
assert(obj->nf == 3);
assert(obj->data);
assert(p);
for (ia = 0; ia < 3; ia++) {
obj->data[addr_rank1(obj->nsites, 3, index, ia)] = p[ia];
}
return 0;
}
/*****************************************************************************
*
* field_tensor
*
* The tensor is expanded from the compressed form.
*
*****************************************************************************/
__host__ __device__
int field_tensor(field_t * obj, int index, double q[3][3]) {
assert(obj);
assert(obj->nf == NQAB);
assert(obj->data);
assert(q);
q[X][X] = obj->data[addr_rank1(obj->nsites, NQAB, index, XX)];
q[X][Y] = obj->data[addr_rank1(obj->nsites, NQAB, index, XY)];
q[X][Z] = obj->data[addr_rank1(obj->nsites, NQAB, index, XZ)];
q[Y][X] = q[X][Y];
q[Y][Y] = obj->data[addr_rank1(obj->nsites, NQAB, index, YY)];
q[Y][Z] = obj->data[addr_rank1(obj->nsites, NQAB, index, YZ)];
q[Z][X] = q[X][Z];
q[Z][Y] = q[Y][Z];
q[Z][Z] = 0.0 - q[X][X] - q[Y][Y];
return 0;
}
/*****************************************************************************
*
* field_tensor_set
*
* The tensor supplied should be traceless and symmetric, as it will
* be stored in 'compressed' form.
*
*****************************************************************************/
__host__ __device__
int field_tensor_set(field_t * obj, int index, double q[3][3]) {
assert(obj);
assert(obj->nf == NQAB);
assert(obj->data);
assert(q);
obj->data[addr_rank1(obj->nsites, NQAB, index, XX)] = q[X][X];
obj->data[addr_rank1(obj->nsites, NQAB, index, XY)] = q[X][Y];
obj->data[addr_rank1(obj->nsites, NQAB, index, XZ)] = q[X][Z];
obj->data[addr_rank1(obj->nsites, NQAB, index, YY)] = q[Y][Y];
obj->data[addr_rank1(obj->nsites, NQAB, index, YZ)] = q[Y][Z];
return 0;
}
/*****************************************************************************
*
* field_scalar_array
*
* Return whatever field data there are for this index in a flattened
* 1d array of length obj->nf.
*
* Array must be of at least obj->nf, but there is no check.
*
*****************************************************************************/
__host__ __device__
int field_scalar_array(field_t * obj, int index, double * array) {
int n;
assert(obj);
assert(obj->data);
assert(array);
for (n = 0; n < obj->nf; n++) {
array[n] = obj->data[addr_rank1(obj->nsites, obj->nf, index, n)];
}
return 0;
}
/*****************************************************************************
*
* field_scalar_array_set
*
*****************************************************************************/
__host__ __device__
int field_scalar_array_set(field_t * obj, int index, const double * array) {
int n;
assert(obj);
assert(obj->data);
assert(array);
for (n = 0; n < obj->nf; n++) {
obj->data[addr_rank1(obj->nsites, obj->nf, index, n)] = array[n];
}
return 0;
}
/*****************************************************************************
*
* field_read
*
*****************************************************************************/
static int field_read(FILE * fp, int index, void * self) {
int n;
double array[NQAB]; /* Largest field currently expected */
field_t * obj = (field_t*) self;
assert(fp);
assert(obj);
assert(obj->nf <= NQAB);
n = fread(array, sizeof(double), obj->nf, fp);
if (n != obj->nf) {
pe_fatal(obj->pe, "fread(field) failed at index %d", index);
}
field_scalar_array_set(obj, index, array);
return 0;
}
/*****************************************************************************
*
* field_read_ascii
*
*****************************************************************************/
static int field_read_ascii(FILE * fp, int index, void * self) {
int n, nread;
double array[NQAB]; /* Largest currently expected */
field_t * obj = (field_t*) self;
assert(fp);
assert(obj);
assert(obj->nf <= NQAB);
for (n = 0; n < obj->nf; n++) {
nread = fscanf(fp, "%le", array + n);
if (nread != 1) {
pe_fatal(obj->pe, "fscanf(field) failed at index %d\n", index);
}
}
field_scalar_array_set(obj, index, array);
return 0;
}
/*****************************************************************************
*
* field_write
*
*****************************************************************************/
static int field_write(FILE * fp, int index, void * self) {
int n;
double array[NQAB]; /* Largest currently expected */
field_t * obj = (field_t*) self;
assert(fp);
assert(obj);
assert(obj->nf <= NQAB);
field_scalar_array(obj, index, array);
n = fwrite(array, sizeof(double), obj->nf, fp);
if (n != obj->nf) {
pe_fatal(obj->pe, "fwrite(field) failed at index %d\n", index);
}
return 0;
}
/*****************************************************************************
*
* field_write_ascii
*
*****************************************************************************/
static int field_write_ascii(FILE * fp, int index, void * self) {
int n, nwrite;
double array[NQAB]; /* Largest currently expected */
field_t * obj = (field_t*) self;
assert(fp);
assert(obj);
assert(obj->nf <= NQAB);
field_scalar_array(obj, index, array);
for (n = 0; n < obj->nf; n++) {
nwrite = fprintf(fp, "%22.15e ", array[n]);
if (nwrite != 23) {
pe_fatal(obj->pe, "fprintf(%s) failed at index %d\n", obj->name, index);
}
}
nwrite = fprintf(fp, "\n");
if (nwrite != 1) {
pe_fatal(obj->pe, "fprintf(%s) failed at index %d\n", obj->name, index);
}
return 0;
}
/*****************************************************************************
*
* field_halo_size
*
*****************************************************************************/
int field_halo_size(cs_limits_t lim) {
int szx = 1 + lim.imax - lim.imin;
int szy = 1 + lim.jmax - lim.jmin;
int szz = 1 + lim.kmax - lim.kmin;
return szx*szy*szz;
}
/*****************************************************************************
*
* field_halo_enqueue_send
*
*****************************************************************************/
int field_halo_enqueue_send(const field_t * field, field_halo_t * h, int ireq) {
assert(field);
assert(h);
assert(1 <= ireq && ireq < h->nvel);
int nx = 1 + h->slim[ireq].imax - h->slim[ireq].imin;
int ny = 1 + h->slim[ireq].jmax - h->slim[ireq].jmin;
int nz = 1 + h->slim[ireq].kmax - h->slim[ireq].kmin;
int strz = 1;
int stry = strz*nz;
int strx = stry*ny;
#pragma omp for nowait
for (int ih = 0; ih < nx*ny*nz; ih++) {
int ic = h->slim[ireq].imin + ih/strx;
int jc = h->slim[ireq].jmin + (ih % strx)/stry;
int kc = h->slim[ireq].kmin + (ih % stry)/strz;
int index = cs_index(field->cs, ic, jc, kc);
for (int ibf = 0; ibf < field->nf; ibf++) {
int faddr = addr_rank1(field->nsites, field->nf, index, ibf);
h->send[ireq][ih*field->nf + ibf] = field->data[faddr];
}
}
return 0;
}
/*****************************************************************************
*
* field_halo_dequeue_recv
*
*****************************************************************************/
int field_halo_dequeue_recv(field_t * field, const field_halo_t * h, int ireq) {
assert(field);
assert(h);
assert(1 <= ireq && ireq < h->nvel);
int nx = 1 + h->rlim[ireq].imax - h->rlim[ireq].imin;
int ny = 1 + h->rlim[ireq].jmax - h->rlim[ireq].jmin;
int nz = 1 + h->rlim[ireq].kmax - h->rlim[ireq].kmin;
int strz = 1;
int stry = strz*nz;
int strx = stry*ny;
double * recv = h->recv[ireq];
/* Check if this a copy from our own send buffer */
{
int i = 1 + h->cv[h->nvel - ireq][X];
int j = 1 + h->cv[h->nvel - ireq][Y];
int k = 1 + h->cv[h->nvel - ireq][Z];
if (h->nbrrank[i][j][k] == h->nbrrank[1][1][1]) recv = h->send[ireq];
}
#pragma omp for nowait
for (int ih = 0; ih < nx*ny*nz; ih++) {
int ic = h->rlim[ireq].imin + ih/strx;
int jc = h->rlim[ireq].jmin + (ih % strx)/stry;
int kc = h->rlim[ireq].kmin + (ih % stry)/strz;
int index = cs_index(field->cs, ic, jc, kc);
for (int ibf = 0; ibf < field->nf; ibf++) {
int faddr = addr_rank1(field->nsites, field->nf, index, ibf);
field->data[faddr] = recv[ih*field->nf + ibf];
}
}
return 0;
}
/*****************************************************************************
*
* field_halo_create
*
* It's convenient to borrow the velocity notation from the lb for
* the commnunication directions.
*
*****************************************************************************/
#include "lb_d3q27.h"
int field_halo_create(const field_t * field, field_halo_t * h) {
int nlocal[3] = {0};
int nhalo = 0;
assert(field);
assert(h);
*h = (field_halo_t) {0};
/* Communictation model */
cs_cart_comm(field->cs, &h->comm);
{
LB_CV_D3Q27(cv27);
h->nvel = 27;
for (int p = 0; p < h->nvel; p++) {
h->cv[p][X] = cv27[p][X];
h->cv[p][Y] = cv27[p][Y];
h->cv[p][Z] = cv27[p][Z];
}
}
/* Ranks of Cartesian neighbours */
{
int dims[3] = {0};
int periods[3] = {0};
int coords[3] = {0};
MPI_Cart_get(h->comm, 3, dims, periods, coords);
for (int p = 0; p < h->nvel; p++) {
int nbr[3] = {0};
int out[3] = {0}; /* Out-of-range is erroneous for non-perioidic dims */
int i = 1 + h->cv[p][X];
int j = 1 + h->cv[p][Y];
int k = 1 + h->cv[p][Z];
nbr[X] = coords[X] + h->cv[p][X];
nbr[Y] = coords[Y] + h->cv[p][Y];
nbr[Z] = coords[Z] + h->cv[p][Z];
out[X] = (!periods[X] && (nbr[X] < 0 || nbr[X] > dims[X] - 1));
out[Y] = (!periods[Y] && (nbr[Y] < 0 || nbr[Y] > dims[Y] - 1));
out[Z] = (!periods[Z] && (nbr[Z] < 0 || nbr[Z] > dims[Z] - 1));
if (out[X] || out[Y] || out[Z]) {
h->nbrrank[i][j][k] = MPI_PROC_NULL;
}
else {
MPI_Cart_rank(h->comm, nbr, &h->nbrrank[i][j][k]);
}
}
/* I must be in the middle */
assert(h->nbrrank[1][1][1] == cs_cart_rank(field->cs));
}
/* Set out limits for send and recv regions. */
cs_nlocal(field->cs, nlocal);
cs_nhalo(field->cs, &nhalo);
for (int p = 1; p < h->nvel; p++) {
int8_t cx = h->cv[p][X];
int8_t cy = h->cv[p][Y];
int8_t cz = h->cv[p][Z];
cs_limits_t send = {1, nlocal[X], 1, nlocal[Y], 1, nlocal[Z]};
cs_limits_t recv = {1, nlocal[X], 1, nlocal[Y], 1, nlocal[Z]};
if (cx == -1) send.imax = nhalo;
if (cx == +1) send.imin = send.imax - (nhalo - 1);
if (cy == -1) send.jmax = nhalo;
if (cy == +1) send.jmin = send.jmax - (nhalo - 1);
if (cz == -1) send.kmax = nhalo;
if (cz == +1) send.kmin = send.kmax - (nhalo - 1);
/* For recv, direction is reversed cf. send */
if (cx == +1) { recv.imin = 1 - nhalo; recv.imax = 0;}
if (cx == -1) { recv.imin = recv.imax + 1; recv.imax = recv.imax + nhalo;}
if (cy == +1) { recv.jmin = 1 - nhalo; recv.jmax = 0;}
if (cy == -1) { recv.jmin = recv.jmax + 1; recv.jmax = recv.jmax + nhalo;}
if (cz == +1) { recv.kmin = 1 - nhalo; recv.kmax = 0;}
if (cz == -1) { recv.kmin = recv.kmax + 1; recv.kmax = recv.kmax + nhalo;}
h->slim[p] = send;
h->rlim[p] = recv;
}
/* Message count and buffers */
for (int p = 1; p < h->nvel; p++) {
int scount = field->nf*field_halo_size(h->slim[p]);
int rcount = field->nf*field_halo_size(h->rlim[p]);
h->send[p] = (double *) calloc(scount, sizeof(double));
h->recv[p] = (double *) calloc(rcount, sizeof(double));
assert(h->send[p]);
assert(h->recv[p]);
}
return 0;
}
/*****************************************************************************
*
* field_halo_post
*
*****************************************************************************/
int field_halo_post(const field_t * field, field_halo_t * h) {
const int tagbase = 2022;
assert(field);
assert(h);
/* Post recvs */
TIMER_start(TIMER_FIELD_HALO_IRECV);
h->request[0] = MPI_REQUEST_NULL;
for (int ireq = 1; ireq < h->nvel; ireq++) {
int i = 1 + h->cv[h->nvel - ireq][X];
int j = 1 + h->cv[h->nvel - ireq][Y];
int k = 1 + h->cv[h->nvel - ireq][Z];
int mcount = field->nf*field_halo_size(h->rlim[ireq]);
h->request[ireq] = MPI_REQUEST_NULL;
if (h->nbrrank[i][j][k] == h->nbrrank[1][1][1]) mcount = 0;
MPI_Irecv(h->recv[ireq], mcount, MPI_DOUBLE, h->nbrrank[i][j][k],
tagbase + ireq, h->comm, h->request + ireq);
}
TIMER_stop(TIMER_FIELD_HALO_IRECV);
/* Load send buffers; post sends */
TIMER_start(TIMER_FIELD_HALO_PACK);
#pragma omp parallel
{
for (int ireq = 1; ireq < h->nvel; ireq++) {
field_halo_enqueue_send(field, h, ireq);
}
}
TIMER_stop(TIMER_FIELD_HALO_PACK);
TIMER_start(TIMER_FIELD_HALO_ISEND);
h->request[27] = MPI_REQUEST_NULL;
for (int ireq = 1; ireq < h->nvel; ireq++) {
int i = 1 + h->cv[ireq][X];
int j = 1 + h->cv[ireq][Y];
int k = 1 + h->cv[ireq][Z];
int mcount = field->nf*field_halo_size(h->slim[ireq]);
if (h->nbrrank[i][j][k] == h->nbrrank[1][1][1]) mcount = 0;
MPI_Isend(h->send[ireq], mcount, MPI_DOUBLE, h->nbrrank[i][j][k],
tagbase + ireq, h->comm, h->request + 27 + ireq);
}
TIMER_stop(TIMER_FIELD_HALO_ISEND);
return 0;
}
/*****************************************************************************
*
* field_halo_wait
*
*****************************************************************************/
int field_halo_wait(field_t * field, field_halo_t * h) {
assert(field);
assert(h);
TIMER_start(TIMER_FIELD_HALO_WAITALL);
MPI_Waitall(2*h->nvel, h->request, MPI_STATUSES_IGNORE);
TIMER_stop(TIMER_FIELD_HALO_WAITALL);
TIMER_start(TIMER_FIELD_HALO_UNPACK);
#pragma omp parallel
{
for (int ireq = 1; ireq < h->nvel; ireq++) {
field_halo_dequeue_recv(field, h, ireq);
}
}
TIMER_stop(TIMER_FIELD_HALO_UNPACK);
return 0;
}
/*****************************************************************************
*
* field_halo_info
*
*****************************************************************************/
int field_halo_info(const field_t * f) {
assert(f);
assert(f->pe);
pe_t * pe = f->pe;
const field_halo_t * h = &f->h;
/* For each direction, send limits */
pe_info(pe, "\n");
pe_info(pe, "Field halo information at root: %s\n", f->name);
pe_info(pe, "\n");
pe_info(pe, "Send requests\n");
pe_info(pe,
"Req (cx cy cz) imin imax jmin jmax kmin kmax bytes\n");
pe_info(pe,
"------------------------------------------------------\n");
for (int ireq = 1; ireq < h->nvel; ireq++) {
pe_info(pe, "%3d (%2d %2d %2d) %4d %4d %4d %4d %4d %4d %9ld\n", ireq,
h->cv[ireq][X], h->cv[ireq][Y], h->cv[ireq][Z],
h->slim[ireq].imin, h->slim[ireq].imax,
h->slim[ireq].jmin, h->slim[ireq].jmax,
h->slim[ireq].kmin, h->slim[ireq].kmax,
(size_t) f->nf*field_halo_size(h->slim[ireq])*sizeof(double));
}
/* Recv limits counts */
pe_info(pe, "\n");
pe_info(pe, "Receive requests\n");
pe_info(pe,
"Req (cx cy cz) imin imax jmin jmax kmin kmax bytes\n");
pe_info(pe,
"------------------------------------------------------\n");
for (int ireq = 1; ireq < h->nvel; ireq++) {
pe_info(pe, "%3d (%2d %2d %2d) %4d %4d %4d %4d %4d %4d %9ld\n", ireq,
h->cv[ireq][X], h->cv[ireq][Y], h->cv[ireq][Z],
h->rlim[ireq].imin, h->rlim[ireq].imax,
h->rlim[ireq].jmin, h->rlim[ireq].jmax,
h->rlim[ireq].kmin, h->rlim[ireq].kmax,
(size_t) f->nf*field_halo_size(h->rlim[ireq])*sizeof(double));
}
return 0;
}
/*****************************************************************************
*
* field_halo_free
*
*****************************************************************************/
int field_halo_free(field_halo_t * h) {
assert(h);
for (int p = 1; p < h->nvel; p++) {
free(h->send[p]);
free(h->recv[p]);
}
*h = (field_halo_t) {0};
return 0;
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundCensus(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
census;
Image
*edge_image;
PixelInfo
background,
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetPixelInfoPixel(image,p,&background);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
artifact=GetImageArtifact(image,"trim:background-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&background,exception);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
census=0.0;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
census++;
p+=GetPixelChannels(edge_image);
}
}
census/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(census);
}
static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge)
{
double
census;
census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(census);
}
static RectangleInfo GetEdgeBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_census,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
(void) memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_census=GetMinEdgeBackgroundCensus(&edge);
for ( ; background_census < percent_background;
background_census=GetMinEdgeBackgroundCensus(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_census) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_census) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_census) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_census) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
PixelInfo
target[4],
zero;
RectangleInfo
bounds;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"trim:percent-background");
if (artifact != (const char *) NULL)
return(GetEdgeBoundingBox(image,exception));
artifact=GetImageArtifact(image, "trim:edges");
if (artifact == (const char *) NULL)
{
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
}
else
{
char
*edges,
*p,
*q;
bounds.width=(size_t) image->columns;
bounds.height=(size_t) image->rows;
bounds.x=0;
bounds.y=0;
edges=AcquireString(artifact);
q=edges;
while ((p=StringToken(",",&q)) != (char *) NULL)
{
if (LocaleCompare(p,"north") == 0)
bounds.y=(ssize_t) image->rows;
if (LocaleCompare(p,"east") == 0)
bounds.width=0;
if (LocaleCompare(p,"south") == 0)
bounds.height=0;
if (LocaleCompare(p,"west") == 0)
bounds.x=(ssize_t) image->columns;
}
edges=DestroyString(edges);
}
GetPixelInfo(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
GetPixelInfoPixel(image,p,&target[0]);
GetPixelInfo(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[1]);
GetPixelInfo(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[2]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t)
image->rows-1,1,1,exception);
if (p != (const Quantum *) NULL)
GetPixelInfoPixel(image,p,&target[3]);
status=MagickTrue;
GetPixelInfo(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,p,&pixel);
if ((x < bounding_box.x) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
if ((x < (ssize_t) bounding_box.width) &&
(y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse))
{
bounding_box.width=(size_t) x;
bounding_box.height=(size_t) y;
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o n v e x H u l l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageConvexHull() returns the convex hull points of an image canvas.
%
% The format of the GetImageConvexHull method is:
%
% PointInfo *GetImageConvexHull(const Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the convex hull.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c)
{
/*
Order by x-coordinate, and in case of a tie, by y-coordinate.
*/
return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x));
}
static PixelInfo GetEdgeBackgroundColor(const Image *image,
const CacheView *image_view,ExceptionInfo *exception)
{
const char
*artifact;
double
census[4],
edge_census;
PixelInfo
background[4],
edge_background;
ssize_t
i;
/*
Most dominant color of edges/corners is the background color of the image.
*/
artifact=GetImageArtifact(image,"convex-hull:background-color");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"background");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i < 4; i++)
{
CacheView
*edge_view;
GravityType
gravity;
Image
*edge_image;
PixelInfo
pixel;
RectangleInfo
edge_geometry;
const Quantum
*p;
ssize_t
y;
census[i]=0.0;
(void) memset(&edge_geometry,0,sizeof(edge_geometry));
switch (i)
{
case 0:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
gravity=WestGravity;
edge_geometry.width=1;
edge_geometry.height=0;
}
case 1:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
gravity=EastGravity;
edge_geometry.width=1;
edge_geometry.height=0;
}
case 2:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
gravity=NorthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
}
case 3:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
gravity=SouthGravity;
edge_geometry.width=0;
edge_geometry.height=1;
}
}
GetPixelInfoPixel(image,p,background+i);
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,background+i,
exception);
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
continue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
GetPixelInfoPixel(edge_image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse)
census[i]++;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
}
edge_census=(-1.0);
for (i=0; i < 4; i++)
if (census[i] > edge_census)
{
edge_background=background[i];
edge_census=census[i];
}
return(edge_background);
}
void TraceConvexHull(PointInfo *vertices,size_t number_vertices,
PointInfo ***monotone_chain,size_t *chain_length)
{
PointInfo
**chain;
ssize_t
i;
size_t
demark,
n;
/*
Construct the upper and lower hulls: rightmost to leftmost counterclockwise.
*/
chain=(*monotone_chain);
n=0;
for (i=0; i < (ssize_t) number_vertices; i++)
{
while ((n >= 2) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
demark=n+1;
for (i=(ssize_t) number_vertices-2; i >= 0; i--)
{
while ((n >= demark) &&
(LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0))
n--;
chain[n++]=(&vertices[i]);
}
*chain_length=n;
}
MagickExport PointInfo *GetImageConvexHull(const Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MemoryInfo
*monotone_info,
*vertices_info;
PixelInfo
background;
PointInfo
*convex_hull,
**monotone_chain,
*vertices;
size_t
n;
ssize_t
y;
/*
Identify convex hull vertices of image foreground object(s).
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices_info=AcquireVirtualMemory(image->columns,image->rows*
sizeof(*vertices));
monotone_info=AcquireVirtualMemory(2*image->columns,2*
image->rows*sizeof(*monotone_chain));
if ((vertices_info == (MemoryInfo *) NULL) ||
(monotone_info == (MemoryInfo *) NULL))
{
if (monotone_info != (MemoryInfo *) NULL)
monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info);
if (vertices_info != (MemoryInfo *) NULL)
vertices_info=RelinquishVirtualMemory(vertices_info);
return((PointInfo *) NULL);
}
vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info);
monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info);
image_view=AcquireVirtualCacheView(image,exception);
background=GetEdgeBackgroundColor(image,image_view,exception);
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse)
{
vertices[n].x=(double) x;
vertices[n].y=(double) y;
n++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Return the convex hull of the image foreground object(s).
*/
TraceConvexHull(vertices,n,&monotone_chain,number_vertices);
convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*convex_hull));
if (convex_hull != (PointInfo *) NULL)
for (n=0; n < *number_vertices; n++)
convex_hull[n]=(*monotone_chain[n]);
monotone_info=RelinquishVirtualMemory(monotone_info);
vertices_info=RelinquishVirtualMemory(vertices_info);
return(convex_hull);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[i])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M i n i m u m B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMinimumBoundingBox() returns the points that form the minimum
% bounding box around the image foreground objects with the "Rotating
% Calipers" algorithm. The method also returns these properties:
% minimum-bounding-box:area, minimum-bounding-box:width,
% minimum-bounding-box:height, and minimum-bounding-box:angle.
%
% The format of the GetImageMinimumBoundingBox method is:
%
% PointInfo *GetImageMinimumBoundingBox(Image *image,
% size_t number_vertices,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_vertices: the number of vertices in the bounding box.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CaliperInfo
{
double
area,
width,
height,
projection;
ssize_t
p,
q,
v;
} CaliperInfo;
static inline double getAngle(PointInfo *p,PointInfo *q)
{
/*
Get the angle between line (p,q) and horizontal axis, in degrees.
*/
return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x)));
}
static inline double getDistance(PointInfo *p,PointInfo *q)
{
double
distance;
distance=hypot(p->x-q->x,p->y-q->y);
return(distance*distance);
}
static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Projection of vector (x,y) - p into a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance);
}
static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v)
{
double
distance;
/*
Distance from a point (x,y) to a line passing through p and q.
*/
distance=getDistance(p,q);
if (distance < MagickEpsilon)
return(INFINITY);
return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance);
}
MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image,
size_t *number_vertices,ExceptionInfo *exception)
{
CaliperInfo
caliper_info;
const char
*artifact;
double
angle,
diameter,
distance;
PointInfo
*bounding_box,
*vertices;
ssize_t
i;
size_t
number_hull_vertices;
/*
Generate the minimum bounding box with the "Rotating Calipers" algorithm.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*number_vertices=0;
vertices=GetImageConvexHull(image,&number_hull_vertices,exception);
if (vertices == (PointInfo *) NULL)
return((PointInfo *) NULL);
*number_vertices=4;
bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices,
sizeof(*bounding_box));
if (bounding_box == (PointInfo *) NULL)
{
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return((PointInfo *) NULL);
}
caliper_info.area=2.0*image->columns*image->rows;
caliper_info.width=(double) image->columns+image->rows;
caliper_info.height=0.0;
caliper_info.projection=0.0;
caliper_info.p=(-1);
caliper_info.q=(-1);
caliper_info.v=(-1);
for (i=0; i < (ssize_t) number_hull_vertices; i++)
{
double
area = 0.0,
max_projection = 0.0,
min_diameter = -1.0,
min_projection = 0.0;
ssize_t
j,
k;
ssize_t
p = -1,
q = -1,
v = -1;
for (j=0; j < (ssize_t) number_hull_vertices; j++)
{
double
diameter;
diameter=fabs(getFeretDiameter(&vertices[i],
&vertices[(i+1) % number_hull_vertices],&vertices[j]));
if (min_diameter < diameter)
{
min_diameter=diameter;
p=i;
q=(i+1) % number_hull_vertices;
v=j;
}
}
for (k=0; k < (ssize_t) number_hull_vertices; k++)
{
double
projection;
/*
Rotating calipers.
*/
projection=getProjection(&vertices[p],&vertices[q],&vertices[k]);
min_projection=MagickMin(min_projection,projection);
max_projection=MagickMax(max_projection,projection);
}
area=min_diameter*(max_projection-min_projection);
if (caliper_info.area > area)
{
caliper_info.area=area;
caliper_info.width=min_diameter;
caliper_info.height=max_projection-min_projection;
caliper_info.projection=max_projection;
caliper_info.p=p;
caliper_info.q=q;
caliper_info.v=v;
}
}
/*
Initialize minimum bounding box.
*/
diameter=getFeretDiameter(&vertices[caliper_info.p],
&vertices[caliper_info.q],&vertices[caliper_info.v]);
angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y,
vertices[caliper_info.q].x-vertices[caliper_info.p].x);
bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)*
caliper_info.projection;
bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)*
caliper_info.projection;
bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+
0.5);
bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+
0.5);
bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+
0.5);
bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+
0.5);
bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+
0.5);
/*
Export minimum bounding box properties.
*/
(void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g",
GetMagickPrecision(),caliper_info.area);
(void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g",
GetMagickPrecision(),caliper_info.width);
(void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g",
GetMagickPrecision(),caliper_info.height);
(void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.p].x,
GetMagickPrecision(),vertices[caliper_info.p].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.q].x,
GetMagickPrecision(),vertices[caliper_info.q].y);
(void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g",
GetMagickPrecision(),vertices[caliper_info.v].x,
GetMagickPrecision(),vertices[caliper_info.v].y);
/*
Find smallest angle to origin.
*/
distance=hypot(bounding_box[0].x,bounding_box[0].y);
angle=getAngle(&bounding_box[0],&bounding_box[1]);
for (i=1; i < 4; i++)
{
double d = hypot(bounding_box[i].x,bounding_box[i].y);
if (d < distance)
{
distance=d;
angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]);
}
}
artifact=GetImageArtifact(image,"minimum-bounding-box:orientation");
if (artifact != (const char *) NULL)
{
double
length,
q_length,
p_length;
PointInfo
delta,
point;
/*
Find smallest perpendicular distance from edge to origin.
*/
point=bounding_box[0];
for (i=1; i < 4; i++)
{
if (bounding_box[i].x < point.x)
point.x=bounding_box[i].x;
if (bounding_box[i].y < point.y)
point.y=bounding_box[i].y;
}
for (i=0; i < 4; i++)
{
bounding_box[i].x-=point.x;
bounding_box[i].y-=point.y;
}
for (i=0; i < 4; i++)
{
double
d,
intercept,
slope;
delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x;
delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y;
slope=delta.y*PerceptibleReciprocal(delta.x);
intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x;
d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)*
PerceptibleReciprocal(sqrt(slope*slope+1.0)));
if ((i == 0) || (d < distance))
{
distance=d;
point=delta;
}
}
angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x)));
length=hypot(point.x,point.y);
p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)-
length);
q_length=fabs(length-(double) MagickMin(caliper_info.width,
caliper_info.height));
if (LocaleCompare(artifact,"landscape") == 0)
{
if (p_length > q_length)
angle+=(angle < 0.0) ? 90.0 : -90.0;
}
else
if (LocaleCompare(artifact,"portrait") == 0)
{
if (p_length < q_length)
angle+=(angle >= 0.0) ? 90.0 : -90.0;
}
}
(void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g",
GetMagickPrecision(),angle);
(void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g",
GetMagickPrecision(),-angle);
vertices=(PointInfo *) RelinquishMagickMemory(vertices);
return(bounding_box);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
bilevel;
ssize_t
x;
const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
bilevel=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
bilevel=MagickFalse;
break;
}
p+=GetPixelChannels(image);
}
if (bilevel == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
return(bilevel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const Quantum
*p;
ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
Quantum
*depth_map;
ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=depth_map[ScaleQuantumToMap(q[i])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType)
q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
status=TransformImageColorspace(image,CMYKColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
nmt_field_flat.c | #include "utils.h"
nmt_k_function *nmt_k_function_alloc(int nk,flouble *karr,flouble *farr,flouble y0,flouble yf,int is_const)
{
nmt_k_function *f=my_malloc(sizeof(nmt_k_function));
f->is_const=is_const;
f->y0=y0;
if(!is_const) {
f->x0=karr[0];
f->xf=karr[nk-1];
f->yf=yf;
f->spl=gsl_spline_alloc(gsl_interp_linear,nk);
gsl_spline_init(f->spl,karr,farr,nk);
}
return f;
}
void nmt_k_function_free(nmt_k_function *f)
{
if(!(f->is_const))
gsl_spline_free(f->spl);
free(f);
}
flouble nmt_k_function_eval(nmt_k_function *f,flouble k,gsl_interp_accel *intacc)
{
if((f->is_const) || (k<=f->x0))
return f->y0;
else if(k>=f->xf)
return f->yf;
else
return gsl_spline_eval(f->spl,k,intacc);
}
#define N_DELL 1
nmt_flatsky_info *nmt_flatsky_info_alloc(int nx,int ny,flouble lx,flouble ly)
{
nmt_flatsky_info *fs=my_malloc(sizeof(nmt_flatsky_info));
fs->nx=nx;
fs->ny=ny;
fs->npix=nx*ny;
fs->lx=lx;
fs->ly=ly;
fs->pixsize=lx*ly/(nx*ny);
int ii;
flouble dkx=2*M_PI/lx;
flouble dky=2*M_PI/ly;
flouble kmax_x=dkx*(nx/2);
flouble kmax_y=dky*(ny/2);
double dk=NMT_MIN(dkx,dky);
double kmax=sqrt(kmax_y*kmax_y+kmax_x*kmax_x);
fs->dell=N_DELL*dk;
fs->i_dell=1./fs->dell;
fs->n_ell=0;
while((fs->n_ell+1)*fs->dell<=kmax)
fs->n_ell++;
fs->ell_min=my_malloc(fs->n_ell*sizeof(flouble));
for(ii=0;ii<fs->n_ell;ii++)
fs->ell_min[ii]=ii*fs->dell;
return fs;
}
void nmt_flatsky_info_free(nmt_flatsky_info *fs)
{
free(fs->ell_min);
free(fs);
}
void nmt_field_flat_free(nmt_field_flat *fl)
{
int imap,itemp;
nmt_k_function_free(fl->beam);
nmt_flatsky_info_free(fl->fs);
for(imap=0;imap<fl->nmaps;imap++) {
dftw_free(fl->maps[imap]);
dftw_free(fl->alms[imap]);
}
dftw_free(fl->mask);
if(fl->ntemp>0) {
for(itemp=0;itemp<fl->ntemp;itemp++) {
for(imap=0;imap<fl->nmaps;imap++) {
dftw_free(fl->temp[itemp][imap]);
dftw_free(fl->a_temp[itemp][imap]);
}
}
}
free(fl->alms);
free(fl->maps);
if(fl->ntemp>0) {
for(itemp=0;itemp<fl->ntemp;itemp++) {
free(fl->temp[itemp]);
free(fl->a_temp[itemp]);
}
free(fl->temp);
free(fl->a_temp);
gsl_matrix_free(fl->matrix_M);
}
if(fl->a_mask!=NULL) {
for(imap=0;imap<fl->nmaps;imap++)
dftw_free(fl->a_mask[imap]);
free(fl->a_mask);
}
free(fl);
}
static void walm_x_lpower(nmt_flatsky_info *fs,fcomplex **walm_in,fcomplex **walm_out,int power)
{
#pragma omp parallel default(none) \
shared(fs,walm_in,walm_out,power)
{
int iy;
flouble dkx=2*M_PI/fs->nx;
flouble dky=2*M_PI/fs->ny;
#pragma omp for
for(iy=0;iy<fs->ny;iy++) {
int ix;
flouble ky;
if(2*iy<=fs->ny)
ky=iy*dky;
else
ky=-(fs->ny-iy)*dky;
for(ix=0;ix<=fs->nx/2;ix++) {
int ipow;
flouble kpow=1;
flouble kx=ix*dkx;
long index=ix+(fs->nx/2+1)*iy;
flouble kmod=sqrt(kx*kx+ky*ky);
for(ipow=0;ipow<power;ipow++)
kpow*=kmod;
walm_out[0][index]=-walm_in[0][index]*kpow;
walm_out[1][index]=0;
}
} //end omp for
} //end omp parallel
}
void nmt_purify_flat(nmt_field_flat *fl,flouble *mask,fcomplex **walm0,
flouble **maps_in,flouble **maps_out,fcomplex **alms)
{
long ip;
int imap;
int purify[2]={0,0};
flouble **pmap=my_malloc(fl->nmaps*sizeof(flouble *));
flouble **wmap=my_malloc(fl->nmaps*sizeof(flouble *));
fcomplex **walm=my_malloc(fl->nmaps*sizeof(fcomplex *));
fcomplex **palm=my_malloc(fl->nmaps*sizeof(fcomplex *));
fcomplex **alm_out=my_malloc(fl->nmaps*sizeof(fcomplex *));
for(imap=0;imap<fl->nmaps;imap++) {
pmap[imap]=dftw_malloc(fl->npix*sizeof(flouble));
wmap[imap]=dftw_malloc(fl->npix*sizeof(flouble));
walm[imap]=dftw_malloc(fl->fs->ny*(fl->fs->nx/2+1)*sizeof(fcomplex));
palm[imap]=dftw_malloc(fl->fs->ny*(fl->fs->nx/2+1)*sizeof(fcomplex));
for(ip=0;ip<fl->fs->ny*(fl->fs->nx/2+1);ip++)
walm[imap][ip]=walm0[imap][ip];
alm_out[imap]=dftw_malloc(fl->fs->ny*(fl->fs->nx/2+1)*sizeof(fcomplex));
}
if(fl->pure_e)
purify[0]=1;
if(fl->pure_b)
purify[1]=1;
//Product with spin-0 mask
for(imap=0;imap<fl->nmaps;imap++)
fs_map_product(fl->fs,maps_in[imap],mask,pmap[imap]);
//Compute SHT and store in alm_out
fs_map2alm(fl->fs,1,2,pmap,alm_out);
//Compute spin-1 mask
walm_x_lpower(fl->fs,walm0,walm,1);
fs_alm2map(fl->fs,1,1,wmap,walm);
//Product with spin-1 mask
for(ip=0;ip<fl->npix;ip++) {
pmap[0][ip]=wmap[0][ip]*maps_in[0][ip]+wmap[1][ip]*maps_in[1][ip];
pmap[1][ip]=wmap[0][ip]*maps_in[1][ip]-wmap[1][ip]*maps_in[0][ip];
}
//Compute DFT, multiply by 2/l and add to alm_out
fs_map2alm(fl->fs,1,1,pmap,palm);
for(imap=0;imap<fl->nmaps;imap++) {
if(purify[imap]) {
#pragma omp parallel default(none) shared(fl,imap,alm_out,palm)
{
int iy;
flouble dkx=2*M_PI/fl->fs->nx;
flouble dky=2*M_PI/fl->fs->ny;
#pragma omp for
for(iy=0;iy<fl->fs->ny;iy++) {
int ix;
flouble ky;
if(2*iy<=fl->fs->ny)
ky=iy*dky;
else
ky=-(fl->fs->ny-iy)*dky;
for(ix=0;ix<=fl->fs->nx/2;ix++) {
flouble kx=ix*dkx;
long index=ix+(fl->fs->nx/2+1)*iy;
flouble kmod=sqrt(kx*kx+ky*ky);
if(kmod>0)
alm_out[imap][index]+=2*palm[imap][index]/kmod;
}
} //end omp for
} //end omp parallel
}
}
//Compute spin-2 mask
walm_x_lpower(fl->fs,walm0,walm,2);
fs_alm2map(fl->fs,1,2,wmap,walm);
//Product with spin-2 mask
for(ip=0;ip<fl->npix;ip++) { //Extra minus sign because of the scalar SHT below
pmap[0][ip]=-1*(wmap[0][ip]*maps_in[0][ip]+wmap[1][ip]*maps_in[1][ip]);
pmap[1][ip]=-1*(wmap[0][ip]*maps_in[1][ip]-wmap[1][ip]*maps_in[0][ip]);
}
//Compute DFT, multiply by 1/l^2 and add to alm_out
fs_map2alm(fl->fs,2,0,pmap,palm);
for(imap=0;imap<fl->nmaps;imap++) {
if(purify[imap]) {
#pragma omp parallel default(none) shared(fl,imap,alm_out,palm)
{
int iy;
flouble dkx=2*M_PI/fl->fs->nx;
flouble dky=2*M_PI/fl->fs->ny;
#pragma omp for
for(iy=0;iy<fl->fs->ny;iy++) {
int ix;
flouble ky;
if(2*iy<=fl->fs->ny)
ky=iy*dky;
else
ky=-(fl->fs->ny-iy)*dky;
for(ix=0;ix<=fl->fs->nx/2;ix++) {
flouble kx=ix*dkx;
long index=ix+(fl->fs->nx/2+1)*iy;
flouble kmod2=kx*kx+ky*ky;
if(kmod2>0)
alm_out[imap][index]+=palm[imap][index]/kmod2;
}
} //end omp for
} //end omp parallel
}
}
for(imap=0;imap<fl->nmaps;imap++) {
for(ip=0;ip<fl->fs->ny*(fl->fs->nx/2+1);ip++)
alms[imap][ip]=alm_out[imap][ip];
}
fs_alm2map(fl->fs,1,2,maps_out,alm_out);
for(imap=0;imap<fl->nmaps;imap++) {
dftw_free(pmap[imap]);
dftw_free(wmap[imap]);
dftw_free(palm[imap]);
dftw_free(walm[imap]);
dftw_free(alm_out[imap]);
}
free(pmap);
free(wmap);
free(palm);
free(walm);
free(alm_out);
}
nmt_field_flat *nmt_field_flat_alloc(int nx,int ny,flouble lx,flouble ly,
flouble *mask,int pol,flouble **maps,int ntemp,flouble ***temp,
int nl_beam,flouble *l_beam,flouble *beam,
int pure_e,int pure_b,double tol_pinv)
{
long ip;
int ii,itemp,itemp2,imap;
nmt_field_flat *fl=my_malloc(sizeof(nmt_field_flat));
fl->fs=nmt_flatsky_info_alloc(nx,ny,lx,ly);
fl->npix=nx*ny;
fl->pol=pol;
if(pol) fl->nmaps=2;
else fl->nmaps=1;
fl->ntemp=ntemp;
fl->pure_e=0;
fl->pure_b=0;
if(pol) {
if(pure_e)
fl->pure_e=1;
if(pure_b)
fl->pure_b=1;
}
if(beam==NULL)
fl->beam=nmt_k_function_alloc(-1,NULL,NULL,1.,1.,1);
else
fl->beam=nmt_k_function_alloc(nl_beam,l_beam,beam,beam[0],0.,0);
fl->mask=dftw_malloc(fl->npix*sizeof(flouble));
for(ip=0;ip<fl->npix;ip++)
fl->mask[ip]=mask[ip];
fl->maps=my_malloc(fl->nmaps*sizeof(flouble *));
for(ii=0;ii<fl->nmaps;ii++)
fl->maps[ii]=dftw_malloc(fl->npix*sizeof(flouble));
if(fl->ntemp>0) {
fl->temp=my_malloc(fl->ntemp*sizeof(flouble **));
for(itemp=0;itemp<fl->ntemp;itemp++) {
fl->temp[itemp]=my_malloc(fl->nmaps*sizeof(flouble *));
for(imap=0;imap<fl->nmaps;imap++) {
fl->temp[itemp][imap]=dftw_malloc(fl->npix*sizeof(flouble));
fs_map_product(fl->fs,temp[itemp][imap],fl->mask,fl->temp[itemp][imap]); //Multiply by mask
}
}
//Compute normalization matrix
fl->matrix_M=gsl_matrix_alloc(fl->ntemp,fl->ntemp);
for(itemp=0;itemp<fl->ntemp;itemp++) {
for(itemp2=itemp;itemp2<fl->ntemp;itemp2++) {
flouble matrix_element=0;
for(imap=0;imap<fl->nmaps;imap++)
matrix_element+=fs_map_dot(fl->fs,fl->temp[itemp][imap],fl->temp[itemp2][imap]);
gsl_matrix_set(fl->matrix_M,itemp,itemp2,matrix_element);
if(itemp2!=itemp)
gsl_matrix_set(fl->matrix_M,itemp2,itemp,matrix_element);
}
}
moore_penrose_pinv(fl->matrix_M,tol_pinv);
//Deproject
for(ii=0;ii<fl->nmaps;ii++)
fs_map_product(fl->fs,maps[ii],fl->mask,fl->maps[ii]);
flouble *prods=my_calloc(fl->ntemp,sizeof(flouble));
for(itemp=0;itemp<fl->ntemp;itemp++) {
for(imap=0;imap<fl->nmaps;imap++)
prods[itemp]+=fs_map_dot(fl->fs,fl->temp[itemp][imap],fl->maps[imap]);
}
for(itemp=0;itemp<fl->ntemp;itemp++) {
flouble alpha=0;
for(itemp2=0;itemp2<fl->ntemp;itemp2++) {
double mij=gsl_matrix_get(fl->matrix_M,itemp,itemp2);
alpha+=mij*prods[itemp2];
}
#ifdef _DEBUG
printf("alpha_%d = %lE\n",itemp,alpha);
#endif //_DEBUG
for(imap=0;imap<fl->nmaps;imap++) {
long ip;
for(ip=0;ip<fl->npix;ip++)
maps[imap][ip]-=alpha*temp[itemp][imap][ip]; //Correct unmasked field (in case of purification)
}
}
free(prods);
}
fl->alms=my_malloc(fl->nmaps*sizeof(fcomplex *));
for(ii=0;ii<fl->nmaps;ii++)
fl->alms[ii]=dftw_malloc(fl->fs->ny*(fl->fs->nx/2+1)*sizeof(fcomplex));
if(fl->ntemp>0) {
fl->a_temp=my_malloc(fl->ntemp*sizeof(fcomplex **));
for(itemp=0;itemp<fl->ntemp;itemp++) {
fl->a_temp[itemp]=my_malloc(fl->nmaps*sizeof(fcomplex *));
for(imap=0;imap<fl->nmaps;imap++)
fl->a_temp[itemp][imap]=dftw_malloc(fl->fs->ny*(fl->fs->nx/2+1)*sizeof(fcomplex));
}
}
if(fl->pol && (fl->pure_e || fl->pure_b)) {
//If purification is needed:
// 1- Compute mask alms
// 2- Purify de-contaminated map
// 3- Compute purified contaminats
//Compute mask DFT (store in fl->a_mask
fl->a_mask=my_malloc(fl->nmaps*sizeof(fcomplex *));
for(imap=0;imap<fl->nmaps;imap++)
fl->a_mask[imap]=dftw_malloc(fl->fs->ny*(fl->fs->nx/2+1)*sizeof(fcomplex));
fs_map2alm(fl->fs,1,0,&(fl->mask),fl->a_mask);
//Purify map
nmt_purify_flat(fl,fl->mask,fl->a_mask,maps,fl->maps,fl->alms);
//Compute purified contaminant DFTs
if(fl->ntemp>0) {
for(itemp=0;itemp<fl->ntemp;itemp++) {
nmt_purify_flat(fl,fl->mask,fl->a_mask,temp[itemp],fl->temp[itemp],fl->a_temp[itemp]);
for(imap=0;imap<fl->nmaps;imap++) //Store non-pure map
fs_map_product(fl->fs,temp[itemp][imap],fl->mask,fl->temp[itemp][imap]);
}
//IMPORTANT: at this stage, fl->maps and fl->alms contain the purified map and SH coefficients
// However, although fl->a_temp contains the purified SH coefficients,
// fl->temp contains the ***non-purified*** maps. This is to speed up the calculation
// of the deprojection bias.
}
}
else {
//If no purification, just multiply by mask and SHT
fl->a_mask=NULL; //No need to store extra-pure mask harmonic coefficients
//Masked map and spherical harmonic coefficients
for(imap=0;imap<fl->nmaps;imap++)
fs_map_product(fl->fs,maps[imap],fl->mask,fl->maps[imap]);
fs_map2alm(fl->fs,1,2*fl->pol,fl->maps,fl->alms);
//Compute template DFT too
if(fl->ntemp>0) {
for(itemp=0;itemp<fl->ntemp;itemp++)
fs_map2alm(fl->fs,1,2*fl->pol,fl->temp[itemp],fl->a_temp[itemp]);
}
}
return fl;
}
flouble **nmt_synfast_flat(int nx,int ny,flouble lx,flouble ly,int nfields,int *spin_arr,
int nl_beam,flouble *l_beam,flouble **beam_fields,
int nl_cell,flouble *l_cell,flouble **cell_fields,
int seed)
{
int ifield,imap;
int nmaps=0,ncls=0;
long npix=nx*ny;
nmt_k_function **beam,**cell;
flouble **maps;
fcomplex **alms;
nmt_flatsky_info *fs=nmt_flatsky_info_alloc(nx,ny,lx,ly);
for(ifield=0;ifield<nfields;ifield++) {
int nmp=1;
if(spin_arr[ifield]) nmp=2;
nmaps+=nmp;
}
imap=0;
beam=my_malloc(nmaps*sizeof(nmt_k_function *));
maps=my_malloc(nmaps*sizeof(flouble *));
for(ifield=0;ifield<nfields;ifield++) {
int imp,nmp=1;
if(spin_arr[ifield]) nmp=2;
for(imp=0;imp<nmp;imp++) {
beam[imap+imp]=nmt_k_function_alloc(nl_beam,l_beam,beam_fields[ifield],beam_fields[ifield][0],0.,0);
maps[imap+imp]=dftw_malloc(npix*sizeof(flouble));
}
imap+=nmp;
}
ncls=(nmaps*(nmaps+1))/2;
cell=my_malloc(ncls*sizeof(nmt_k_function *));
for(imap=0;imap<ncls;imap++)
cell[imap]=nmt_k_function_alloc(nl_cell,l_cell,cell_fields[imap],cell_fields[imap][0],0.,0);
alms=fs_synalm(nx,ny,lx,ly,nmaps,cell,beam,seed);
for(imap=0;imap<nmaps;imap++)
nmt_k_function_free(beam[imap]);
free(beam);
for(imap=0;imap<ncls;imap++)
nmt_k_function_free(cell[imap]);
free(cell);
imap=0;
for(ifield=0;ifield<nfields;ifield++) {
int imp,nmp=1;
if(spin_arr[ifield]) nmp=2;
fs_alm2map(fs,1,spin_arr[ifield],&(maps[imap]),&(alms[imap]));
for(imp=0;imp<nmp;imp++)
dftw_free(alms[imap+imp]);
imap+=nmp;
}
free(alms);
nmt_flatsky_info_free(fs);
return maps;
}
|
dlacpy.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlacpy.c, normal z -> d, Fri Sep 28 17:38:07 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup plasma_lacpy
*
* Copies general rectangular or upper or lower triangular part of
* a two-dimensional m-by-n matrix A to another m-by-n matrix B.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the part of the matrix A to be copied to B.
* - PlasmaGeneral: General rectangular matrix A
* - PlasmaUpper: Upper triangular part of A
* - PlasmaLower: Lower triangular part of A
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] pA
* The m-by-n matrix A. If uplo = PlasmaUpper, only the upper trapezium
* is accessed; if uplo = PlasmaLower, only the lower trapezium is
* accessed.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] pB
* The m-by-n matrix B.
* On exit, B = A in the locations specified by uplo.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dlacpy
* @sa plasma_clacpy
* @sa plasma_dlacpy
* @sa plasma_slacpy
*
******************************************************************************/
int plasma_dlacpy(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
double *pA, int lda,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (transa != PlasmaNoTrans && m != n) {
plasma_error("illegal value of m and n");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -6;
}
if (ldb < imax(1, (transa == PlasmaNoTrans ? m : n))) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lacpy(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A, B;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_general_desc_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_dlacpy(uplo, transa, A, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_lacpy
*
* Copies general rectangular or upper or lower triangular part of
* a two-dimensional m-by-n matrix A to another m-by-n matrix B. Non-blocking
* tile version of plasma_dlacpy(). May return before the computation is
* finished. Operates on matrices stored by tiles. All matrices are passed
* through descriptors. All dimensions are taken from the descriptors. Allows
* for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the part of the matrix A to be copied to B.
* - PlasmaGeneral: General rectangular matrix A
* - PlasmaUpper: Upper triangular part of A
* - PlasmaLower: Lower triangular part of A
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check the
* sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dlacpy
* @sa plasma_omp_clacpy
* @sa plasma_omp_dlacpy
* @sa plasma_omp_slacpy
*
******************************************************************************/
void plasma_omp_dlacpy(plasma_enum_t uplo, plasma_enum_t transa,
plasma_desc_t A, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaGeneral) &&
(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pdlacpy(uplo, transa, A, B, sequence, request);
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
#if (defined(__AVX__) && defined(__x86_64__)) || defined(_WIN64)
#define OSXSAVEFlag (1UL<<27)
#define AVXFlag ((1UL<<28)|OSXSAVEFlag)
#define FMAFlag ((1UL<<12)|AVXFlag|OSXSAVEFlag)
#define CLMULFlag ((1UL<< 1)|AVXFlag|OSXSAVEFlag)
#define VAESFlag ((1UL<<25)|AVXFlag|OSXSAVEFlag)
#include <stdint.h>
#ifdef _WIN64
#include <intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#else // Linux GCC/Clang
#include <x86intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <cpuid.h>
void asm_cpuid(uint32_t* abcd, uint32_t eax)
{
uint32_t ebx = 0, edx = 0, ecx = 0;
// EBX is saved to EDI and later restored
__asm__("movl %%ebx, %%edi;"
"cpuid;"
"xchgl %%ebx, %%edi;"
: "=D"(ebx),
"+a"(eax), "+c"(ecx), "=d"(edx));
abcd[0] = eax;
abcd[1] = ebx;
abcd[2] = ecx;
abcd[3] = edx;
}
#endif
int simd_detect_x86(unsigned int idFeature)
{
uint32_t regs[4]; // EAX, EBX, ECX, EDX;
#ifdef _WIN32
__cpuid(regs, 0);
if (regs[0] > 1U) __cpuid(regs, 1);
#else
__get_cpuid(0, ®s[0], ®s[1], ®s[2], ®s[3]);
if(regs[0] > 1U) __get_cpuid(1, ®s[0], ®s[1], ®s[2], ®s[3]);
#endif
if ((regs[2] & idFeature) != idFeature)
return 0;
return 1;
}
int is_fma_avx() {
static int result = -1;
if (result == -1) {
result = simd_detect_x86(AVXFlag);
if (result == 1) printf(" Used AVX \n");
else printf(" Not used AVX \n");
}
return result;
}
// https://software.intel.com/sites/landingpage/IntrinsicsGuide
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
if (is_fma_avx() == 1) { // AVX
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
float A_PART = ALPHA*A[i*lda + k];
__m256 a256, b256, c256, result256; // AVX
a256 = _mm256_set1_ps(A_PART);
for (j = 0; j < N - 8; j += 8) {
b256 = _mm256_loadu_ps(&B[k*ldb + j]);
c256 = _mm256_loadu_ps(&C[i*ldc + j]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//result256 = _mm256_fmadd_ps(a256, b256, c256);
result256 = _mm256_mul_ps(a256, b256);
result256 = _mm256_add_ps(result256, c256);
_mm256_storeu_ps(&C[i*ldc + j], result256);
}
int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
for (j = prev_end; j < N; ++j)
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
else {
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
/* // SSE
__m128 a128, b128, c128, result128; // SSE
a128 = _mm_set1_ps(A_PART);
for (j = 0; j < N - 4; j += 4) {
b128 = _mm_loadu_ps(&B[k*ldb + j]);
c128 = _mm_loadu_ps(&C[i*ldc + j]);
//result128 = _mm_fmadd_ps(a128, b128, c128);
result128 = _mm_mul_ps(a128, b128);
result128 = _mm_add_ps(result128, c128);
_mm_storeu_ps(&C[i*ldc + j], result128);
}
int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4;
for (j = prev_end; j < N; ++j){
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
*/
}
}
}
}
#else
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
#endif // __x86_64
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
int t;
#pragma omp parallel for
for (t = 0; t < M; ++t) {
if (!TA && !TB)
gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else if (TA && !TB)
gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
else if (!TA && TB)
gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else
gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
}
}
#ifdef GPU
#include <math.h>
void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t stream_status = cublasSetStream(handle, get_cuda_stream());
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
float *C_gpu = cuda_make_array(C, ldc*M);
gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
cuda_pull_array(C_gpu, C, ldc*M);
cuda_free(A_gpu);
cuda_free(B_gpu);
cuda_free(C_gpu);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_ongpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,192,729,1600);
time_ongpu(0,0,384,196,1728);
time_ongpu(0,0,256,196,3456);
time_ongpu(0,0,256,196,2304);
time_ongpu(0,0,128,4096,12544);
time_ongpu(0,0,128,4096,4096);
*/
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,576,12544);
time_ongpu(0,0,256,2304,784);
time_ongpu(1,1,2304,256,784);
time_ongpu(0,0,512,4608,196);
time_ongpu(1,1,4608,512,196);
return 0;
}
#endif
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "dark_cuda.h"
#include "box.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define NUMCHARS 37
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = random_gen()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed)
{
int speed = rand_int(1, augment_speed);
if (speed < 1) speed = 1;
char** sequentia_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d, mini_batch = %d \n", n, mini_batch);
unsigned int *start_time_indexes = (unsigned int *)calloc(mini_batch, sizeof(unsigned int));
for (i = 0; i < mini_batch; ++i) {
start_time_indexes[i] = random_gen() % m;
//printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]);
}
for (i = 0; i < n; ++i) {
do {
int time_line_index = i % mini_batch;
unsigned int index = start_time_indexes[time_line_index] % m;
start_time_indexes[time_line_index] += speed;
//int index = random_gen() % m;
sequentia_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf(" index = %u - grp: %s \n", index, paths[index]);
if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]);
} while (strlen(sequentia_paths[i]) == 0);
}
free(start_time_indexes);
pthread_mutex_unlock(&mutex);
return sequentia_paths;
}
char **get_random_paths(char **paths, int n, int m)
{
char** random_paths = (char**)calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
//printf("n = %d \n", n);
for(i = 0; i < n; ++i){
do {
int index = random_gen() % m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
//printf("grp: %s\n", paths[index]);
if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]);
} while (strlen(random_paths[i]) == 0);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char** replace_paths = (char**)calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure)
{
int i;
matrix X;
X.rows = n;
X.vals = (float**)calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
int size = w > h ? w : h;
image im = load_image_color(paths[i], 0, 0);
image crop = random_augment_image(im, angle, aspect, min, max, size);
int flip = use_flip ? random_gen() % 2 : 0;
if (flip)
flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
image sized = resize_image(crop, w, h);
//show_image(im, "orig");
//show_image(sized, "sized");
//show_image(sized, paths[i]);
//wait_until_press_key_cv();
//printf("w = %d, h = %d \n", sized.w, sized.h);
free_image(im);
free_image(crop);
X.vals[i] = sized.data;
X.cols = sized.h*sized.w*sized.c;
}
return X;
}
extern int check_mistakes;
box_label *read_boxes(char *filename, int *n)
{
box_label* boxes = (box_label*)calloc(1, sizeof(box_label));
FILE *file = fopen(filename, "r");
if (!file) {
printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename);
//file_error(filename);
FILE* fw = fopen("bad.list", "a");
fwrite(filename, sizeof(char), strlen(filename), fw);
char *new_line = "\n";
fwrite(new_line, sizeof(char), strlen(new_line), fw);
fclose(fw);
if (check_mistakes) getchar();
*n = 0;
return boxes;
}
float x, y, h, w;
int id;
int count = 0;
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
boxes = (box_label*)realloc(boxes, (count + 1) * sizeof(box_label));
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = random_gen()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 ||
(boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1)
{
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 30; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .001 || h < .001) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy,
int net_w, int net_h)
{
char labelpath[4096];
replace_image_to_label(path, labelpath);
int count = 0;
int i;
box_label *boxes = read_boxes(labelpath, &count);
int min_w_h = 0;
float lowest_w = 1.F / net_w;
float lowest_h = 1.F / net_h;
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if (count > num_boxes) count = num_boxes;
float x, y, w, h;
int id;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
// not detect small objects
//if ((w < 0.001F || h < 0.001F)) continue;
// if truth (box for object) is smaller than 1x1 pix
char buff[256];
if (id >= classes) {
printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d] \n", id, (classes-1));
sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1));
system(buff);
getchar();
++sub;
continue;
}
if ((w < lowest_w || h < lowest_h)) {
//sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath);
//system(buff);
++sub;
continue;
}
if (x == 999999 || y == 999999) {
printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1 \n");
sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (x <= 0 || x > 1 || y <= 0 || y > 1) {
printf("\n Wrong annotation: x = %f, y = %f \n", x, y);
sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y);
system(buff);
++sub;
if (check_mistakes) getchar();
continue;
}
if (w > 1) {
printf("\n Wrong annotation: w = %f \n", w);
sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w);
system(buff);
w = 1;
if (check_mistakes) getchar();
}
if (h > 1) {
printf("\n Wrong annotation: h = %f \n", h);
sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h);
system(buff);
h = 1;
if (check_mistakes) getchar();
}
if (x == 0) x += lowest_w;
if (y == 0) y += lowest_h;
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
if (min_w_h == 0) min_w_h = w*net_w;
if (min_w_h > w*net_w) min_w_h = w*net_w;
if (min_w_h > h*net_h) min_w_h = h*net_h;
}
free(boxes);
return min_w_h;
}
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps)
{
int i;
memset(truth, 0, k * sizeof(float));
int count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
truth[i] = (1 - label_smooth_eps);
++count;
}
else {
truth[i] = label_smooth_eps / (k - 1);
}
}
if (count != 1) {
printf("Too many or too few labels: %d, %s\n", count, path);
count = 0;
for (i = 0; i < k; ++i) {
if (strstr(path, labels[i])) {
printf("\t label %d: %s \n", count, labels[i]);
count++;
}
}
}
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth_smooth(paths[i], labels, k, y.vals[i], label_smooth_eps);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "imgs", "labels", label);
find_replace(label, "_iconl.jpeg", ".txt", label);
FILE *file = fopen(label, "r");
if(!file){
find_replace(label, "labels", "labels2", label);
file = fopen(label, "r");
if(!file) continue;
}
++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
printf("%d/%d\n", count, n);
return y;
}
char **get_labels_custom(char *filename, int *size)
{
list *plist = get_paths(filename);
if(size) *size = plist->size;
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
char **get_labels(char *filename)
{
return get_labels_custom(filename, NULL);
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = random_gen()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*30;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = random_gen()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
void blend_truth(float *new_truth, int boxes, float *old_truth)
{
const int t_size = 4 + 1;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*(4 + 1)];
if (!x) break;
count_new_truth++;
}
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + t*t_size;
float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size;
float x = old_truth_ptr[0];
if (!x) break;
new_truth_ptr[0] = old_truth_ptr[0];
new_truth_ptr[1] = old_truth_ptr[1];
new_truth_ptr[2] = old_truth_ptr[2];
new_truth_ptr[3] = old_truth_ptr[3];
new_truth_ptr[4] = old_truth_ptr[4];
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
void blend_truth_mosaic(float *new_truth, int boxes, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup,
int left_shift, int right_shift, int top_shift, int bot_shift)
{
const int t_size = 4 + 1;
int count_new_truth = 0;
int t;
for (t = 0; t < boxes; ++t) {
float x = new_truth[t*(4 + 1)];
if (!x) break;
count_new_truth++;
}
int new_t = count_new_truth;
for (t = count_new_truth; t < boxes; ++t) {
float *new_truth_ptr = new_truth + new_t*t_size;
new_truth_ptr[0] = 0;
float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size;
float x = old_truth_ptr[0];
if (!x) break;
float xb = old_truth_ptr[0];
float yb = old_truth_ptr[1];
float wb = old_truth_ptr[2];
float hb = old_truth_ptr[3];
// shift 4 images
if (i_mixup == 0) {
xb = xb - (float)(w - cut_x - right_shift) / w;
yb = yb - (float)(h - cut_y - bot_shift) / h;
}
if (i_mixup == 1) {
xb = xb + (float)(cut_x - left_shift) / w;
yb = yb - (float)(h - cut_y - bot_shift) / h;
}
if (i_mixup == 2) {
xb = xb - (float)(w - cut_x - right_shift) / w;
yb = yb + (float)(cut_y - top_shift) / h;
}
if (i_mixup == 3) {
xb = xb + (float)(cut_x - left_shift) / w;
yb = yb + (float)(cut_y - top_shift) / h;
}
int left = (xb - wb / 2)*w;
int right = (xb + wb / 2)*w;
int top = (yb - hb / 2)*h;
int bot = (yb + hb / 2)*h;
// fix out of bound
if (left < 0) {
float diff = (float)left / w;
xb = xb - diff / 2;
wb = wb + diff;
}
if (right > w) {
float diff = (float)(right - w) / w;
xb = xb - diff / 2;
wb = wb - diff;
}
if (top < 0) {
float diff = (float)top / h;
yb = yb - diff / 2;
hb = hb + diff;
}
if (bot > h) {
float diff = (float)(bot - h) / h;
yb = yb - diff / 2;
hb = hb - diff;
}
left = (xb - wb / 2)*w;
right = (xb + wb / 2)*w;
top = (yb - hb / 2)*h;
bot = (yb + hb / 2)*h;
// leave only within the image
if(left >= 0 && right <= w && top >= 0 && bot <= h &&
wb > 0 && wb < 1 && hb > 0 && hb < 1 &&
xb > 0 && xb < 1 && yb > 0 && yb < 1)
{
new_truth_ptr[0] = xb;
new_truth_ptr[1] = yb;
new_truth_ptr[2] = wb;
new_truth_ptr[3] = hb;
new_truth_ptr[4] = old_truth_ptr[4];
new_t++;
}
}
//printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t);
}
#ifdef OPENCV
#include "http_stream.h"
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup,
float jitter, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
assert(use_mixup != 2);
if (random_gen() % 2 == 0) use_mixup = 0;
int i;
int *cut_x = NULL, *cut_y = NULL;
if (use_mixup == 3) {
cut_x = (int*)calloc(n, sizeof(int));
cut_y = (int*)calloc(n, sizeof(int));
const float min_offset = 0.2; // 20%
for (i = 0; i < n; ++i) {
cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset));
cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset));
}
}
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5*boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1)
char **random_paths;
if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
const char *filename = random_paths[i];
int flag = (c >= 3);
mat_cv *src;
src = load_image_mat_cv(filename, flag);
if (src == NULL) {
if (check_mistakes) getchar();
continue;
}
int oh = get_height_mat(src);
int ow = get_width_mat(src);
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
if (use_blur) {
int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image
if (tmp_blur == 0) blur = 0;
else if (tmp_blur == 1) blur = 1;
else blur = use_blur;
}
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
//printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh)/2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow)/2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small
image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp,
blur, boxes, truth);
if (use_mixup == 0) {
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
}
else if (use_mixup == 1) {
if (i_mixup == 0) {
d.X.vals[i] = ai.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
}
else if (i_mixup == 1) {
image old_img = make_empty_image(w, h, c);
old_img.data = d.X.vals[i];
//show_image(ai, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images_cv(ai, 0.5, old_img, 0.5);
blend_truth(d.y.vals[i], boxes, truth);
free_image(old_img);
d.X.vals[i] = ai.data;
}
}
else if (use_mixup == 3) {
if (i_mixup == 0) {
image tmp_img = make_image(w, h, c);
d.X.vals[i] = tmp_img.data;
}
if (flip) {
int tmp = pleft;
pleft = pright;
pright = tmp;
}
const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow)));
const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh)));
const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow)));
const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh)));
int k, x, y;
for (k = 0; k < c; ++k) {
for (y = 0; y < h; ++y) {
int j = y*w + k*w*h;
if (i_mixup == 0 && y < cut_y[i]) {
int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h;
memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float));
}
if (i_mixup == 1 && y < cut_y[i]) {
int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h;
memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float));
}
if (i_mixup == 2 && y >= cut_y[i]) {
int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h;
memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float));
}
if (i_mixup == 3 && y >= cut_y[i]) {
int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h;
memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float));
}
}
}
blend_truth_mosaic(d.y.vals[i], boxes, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift);
free_image(ai);
ai.data = d.X.vals[i];
}
if (show_imgs && i_mixup == use_mixup) // delete i_mixup
{
image tmp_ai = copy_image(ai);
char buff[1000];
//sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*ai.w;
int right = (b.x + b.w / 2.)*ai.w;
int top = (b.y - b.h / 2.)*ai.h;
int bot = (b.y + b.h / 2.)*ai.h;
draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(tmp_ai, buff);
if (show_imgs == 1) {
//char buff_src[1000];
//sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen());
//show_image_mat(src, buff_src);
show_image(tmp_ai, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
free_image(tmp_ai);
}
release_mat(&src);
free(truth);
}
if (random_paths) free(random_paths);
}
return d;
}
#else // OPENCV
void blend_images(image new_img, float alpha, image old_img, float beta)
{
int i;
int data_size = new_img.w * new_img.h * new_img.c;
#pragma omp parallel for
for (i = 0; i < data_size; ++i)
new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_blur, int use_mixup, float jitter,
float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs)
{
const int random_index = random_gen();
c = c ? c : 3;
char **random_paths;
char **mixup_random_paths = NULL;
if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else random_paths = get_random_paths(paths, n, m);
assert(use_mixup < 2);
int mixup = use_mixup ? random_gen() % 2 : 0;
//printf("\n mixup = %d \n", mixup);
if (mixup) {
if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed);
else mixup_random_paths = get_random_paths(paths, n, m);
}
int i;
data d = { 0 };
d.shallow = 0;
d.X.rows = n;
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5 * boxes);
int i_mixup = 0;
for (i_mixup = 0; i_mixup <= mixup; i_mixup++) {
if (i_mixup) augmentation_calculated = 0;
for (i = 0; i < n; ++i) {
float *truth = (float*)calloc(5 * boxes, sizeof(float));
char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i];
image orig = load_image(filename, 0, 0, c);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
if (!augmentation_calculated || !track)
{
augmentation_calculated = 1;
r1 = random_float();
r2 = random_float();
r3 = random_float();
r4 = random_float();
r_scale = random_float();
dhue = rand_uniform_strong(-hue, hue);
dsat = rand_scale(saturation);
dexp = rand_scale(exposure);
flip = use_flip ? random_gen() % 2 : 0;
}
int pleft = rand_precalc_random(-dw, dw, r1);
int pright = rand_precalc_random(-dw, dw, r2);
int ptop = rand_precalc_random(-dh, dh, r3);
int pbot = rand_precalc_random(-dh, dh, r4);
float scale = rand_precalc_random(.25, 2, r_scale); // unused currently
if (letter_box)
{
float img_ar = (float)ow / (float)oh;
float net_ar = (float)w / (float)h;
float result_ar = img_ar / net_ar;
//printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if (result_ar > 1) // sheight - should be increased
{
float oh_tmp = ow / net_ar;
float delta_h = (oh_tmp - oh) / 2;
ptop = ptop - delta_h;
pbot = pbot - delta_h;
//printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
}
else // swidth - should be increased
{
float ow_tmp = oh * net_ar;
float delta_w = (ow_tmp - ow) / 2;
pleft = pleft - delta_w;
pright = pright - delta_w;
//printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
}
}
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft / ow) / sx;
float dy = ((float)ptop / oh) / sy;
image sized = resize_image(cropped, w, h);
if (flip) flip_image(sized);
distort_image(sized, dhue, dsat, dexp);
//random_distort_image(sized, hue, saturation, exposure);
fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h);
if (i_mixup) {
image old_img = sized;
old_img.data = d.X.vals[i];
//show_image(sized, "new");
//show_image(old_img, "old");
//wait_until_press_key_cv();
blend_images(sized, 0.5, old_img, 0.5);
blend_truth(truth, boxes, d.y.vals[i]);
free_image(old_img);
}
d.X.vals[i] = sized.data;
memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float));
if (show_imgs)// && i_mixup)
{
char buff[1000];
sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen());
int t;
for (t = 0; t < boxes; ++t) {
box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1);
if (!b.x) break;
int left = (b.x - b.w / 2.)*sized.w;
int right = (b.x + b.w / 2.)*sized.w;
int top = (b.y - b.h / 2.)*sized.h;
int bot = (b.y + b.h / 2.)*sized.h;
draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB
}
save_image(sized, buff);
if (show_imgs == 1) {
show_image(sized, buff);
wait_until_press_key_cv();
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n");
//getchar();
}
free_image(orig);
free_image(cropped);
free(truth);
}
}
free(random_paths);
if (mixup_random_paths) free(mixup_random_paths);
return d;
}
#endif // OPENCV
void *load_thread(void *ptr)
{
//srand(time(0));
//printf("Loading data: %d\n", random_gen());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.blur, a.mixup, a.jitter,
a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
}else if (a.type == LETTERBOX_DATA) {
*(a.im) = load_image(a.path, 0, 0, a.c);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
//srand(time(0));
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data* buffers = (data*)calloc(args.threads, sizeof(data));
pthread_t* threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args* ptr = (load_args*)calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = (float**)calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = (float**)calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = random_gen()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int mixup, int use_blur, int show_imgs, float label_smooth_eps)
{
char **paths_stored = paths;
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps);
if (mixup && rand_int(0, 1)) {
char **paths_mix = get_random_paths(paths_stored, n, m);
data d2 = { 0 };
d2.shallow = 0;
d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure);
d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps);
free(paths_mix);
data d3 = { 0 };
d3.shallow = 0;
data d4 = { 0 };
d4.shallow = 0;
if (mixup >= 3) {
char **paths_mix3 = get_random_paths(paths_stored, n, m);
d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure);
d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps);
free(paths_mix3);
char **paths_mix4 = get_random_paths(paths_stored, n, m);
d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure);
d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps);
free(paths_mix4);
}
// mix
int i, j;
for (i = 0; i < d2.X.rows; ++i) {
if (mixup == 4) mixup = rand_int(2, 3); // alternate MixUp and CutMix
// MixUp -----------------------------------
if (mixup == 1) {
// mix images
for (j = 0; j < d2.X.cols; ++j) {
d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f;
}
// mix labels
for (j = 0; j < d2.y.cols; ++j) {
d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f;
}
}
// CutMix -----------------------------------
else if (mixup == 2) {
const float min = 0.3; // 0.3*0.3 = 9%
const float max = 0.8; // 0.8*0.8 = 64%
const int cut_w = rand_int(w*min, w*max);
const int cut_h = rand_int(h*min, h*max);
const int cut_x = rand_int(0, w - cut_w - 1);
const int cut_y = rand_int(0, h - cut_h - 1);
const int left = cut_x;
const int right = cut_x + cut_w;
const int top = cut_y;
const int bot = cut_y + cut_h;
assert(cut_x >= 0 && cut_x <= w);
assert(cut_y >= 0 && cut_y <= h);
assert(cut_w >= 0 && cut_w <= w);
assert(cut_h >= 0 && cut_h <= h);
assert(right >= 0 && right <= w);
assert(bot >= 0 && bot <= h);
assert(top <= bot);
assert(left <= right);
const float alpha = (float)(cut_w*cut_h) / (float)(w*h);
const float beta = 1 - alpha;
int c, x, y;
for (c = 0; c < 3; ++c) {
for (y = top; y < bot; ++y) {
for (x = left; x < right; ++x) {
int j = x + y*w + c*w*h;
d.X.vals[i][j] = d2.X.vals[i][j];
}
}
}
//printf("\n alpha = %f, beta = %f \n", alpha, beta);
// mix labels
for (j = 0; j < d.y.cols; ++j) {
d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha;
}
}
// Mosaic -----------------------------------
else if (mixup == 3)
{
const float min_offset = 0.2; // 20%
const int cut_x = rand_int(w*min_offset, w*(1 - min_offset));
const int cut_y = rand_int(h*min_offset, h*(1 - min_offset));
float s1 = (float)(cut_x * cut_y) / (w*h);
float s2 = (float)((w - cut_x) * cut_y) / (w*h);
float s3 = (float)(cut_x * (h - cut_y)) / (w*h);
float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h);
int c, x, y;
for (c = 0; c < 3; ++c) {
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
int j = x + y*w + c*w*h;
if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j];
if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j];
if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j];
if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j];
}
}
}
for (j = 0; j < d.y.cols; ++j) {
d.y.vals[i][j] = d.y.vals[i][j] * s1 + d2.y.vals[i][j] * s2 + d3.y.vals[i][j] * s3 + d4.y.vals[i][j] * s4;
}
}
}
free_data(d2);
if (mixup == 3) {
free_data(d3);
free_data(d4);
}
}
#ifdef OPENCV
if (use_blur) {
int i;
for (i = 0; i < d.X.rows; ++i) {
if (random_gen() % 2) {
image im = make_empty_image(w, h, 3);
im.data = d.X.vals[i];
int ksize = use_blur;
if (use_blur == 1) ksize = 17;
image blurred = blur_image(im, ksize);
free_image(im);
d.X.vals[i] = blurred.data;
//if (i == 0) {
// show_image(im, "Not blurred");
// show_image(blurred, "blurred");
// wait_until_press_key_cv();
//}
}
}
}
#endif // OPENCV
if (show_imgs) {
int i, j;
for (i = 0; i < d.X.rows; ++i) {
image im = make_empty_image(w, h, 3);
im.data = d.X.vals[i];
char buff[1000];
sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen());
save_image(im, buff);
char buff_string[1000];
sprintf(buff_string, "\n Classes: ");
for (j = 0; j < d.y.cols; ++j) {
if (d.y.vals[i][j] > 0) {
char buff_tmp[100];
sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]);
strcat(buff_string, buff_tmp);
}
}
printf("%s \n", buff_string);
if (show_imgs == 1) {
show_image(im, buff);
wait_until_press_key_cv();
}
}
printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n");
}
if (m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = w;
d.h = h;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data newdata = concat_data(d[i], out);
free_data(out);
out = newdata;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = random_gen()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class_id = bytes[0];
y.vals[i+b*10000][class_id] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
//translate_data_rows(d, -128);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = random_gen()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = (float**)calloc(num, sizeof(float*));
r.y.vals = (float**)calloc(num, sizeof(float*));
int i;
for(i = 0; i < num; ++i){
int index = random_gen()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data* split = (data*)calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = (float**)calloc(train.X.rows, sizeof(float*));
test.X.vals = (float**)calloc(test.X.rows, sizeof(float*));
train.y.vals = (float**)calloc(train.y.rows, sizeof(float*));
test.y.vals = (float**)calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
GB_unaryop__identity_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_bool
// op(A') function: GB_tran__identity_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sieve.c | /*
Tempo - Sequencial
5761455
real 0m3.999s
user 0m3.918s
sys 0m0.068s
5761455
real 0m4.013s
user 0m3.918s
sys 0m0.076s
5761455
real 0m4.179s
user 0m4.061s
sys 0m0.091s
5761455
real 0m3.981s
user 0m3.891s
sys 0m0.076s
5761455
real 0m4.311s
user 0m4.212s
sys 0m0.088s
5761455
real 0m4.318s
user 0m4.114s
sys 0m0.179s
5761455
real 0m4.049s
user 0m3.904s
sys 0m0.120s
5761455
real 0m4.454s
user 0m4.004s
sys 0m0.440s
Tempo - Paralelo
5761455
real 0m2.281s
user 0m8.659s
sys 0m0.089s
5761455
real 0m2.563s
user 0m9.498s
sys 0m0.295s
5761455
real 0m2.504s
user 0m9.418s
sys 0m0.231s
5761455
real 0m2.460s
user 0m9.175s
sys 0m0.275s
5761455
real 0m2.540s
user 0m9.627s
sys 0m0.184s
5761455
real 0m2.528s
user 0m9.669s
sys 0m0.102s
5761455
real 0m2.341s
user 0m8.716s
sys 0m0.227s
5761455
real 0m2.278s
user 0m8.696s
sys 0m0.072s
*/
/*
* Adapted from: http://w...content-available-to-author-only...s.org/sieve-of-eratosthenes
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
int sieveOfEratosthenes(int n)
{
// Create a boolean array "prime[0..n]" and initialize
// all entries it as true. A value in prime[i] will
// finally be false if i is Not a prime, else true.
int primes = 0;
bool *prime = (bool*) malloc((n+1)*sizeof(bool));
int sqrt_n = sqrt(n);
memset(prime, true,(n+1)*sizeof(bool));
int i, p;
#pragma omp parallel for schedule(dynamic)
for (p=2; p <= sqrt_n; p++)
{
// If prime[p] is not changed, then it is a prime
if (prime[p] == true)
{
// Update all multiples of p
#pragma omp parallel for schedule(static)
for(i=p*2; i<=n; i += p)
prime[i] = false;
}
}
// count prime numbers
#pragma omp parallel for reduction(+:primes)
for (int p=2; p<=n; p++)
if (prime[p])
primes++;
return(primes);
}
int main()
{
int n = 100000000;
printf("%d\n",sieveOfEratosthenes(n));
return 0;
}
|
ars_vectorized_environment.h | #ifndef ARS_VECTORIZED_ENVIRONMENT_H
#define ARS_VECTORIZED_ENVIRONMENT_H
#include <algorithm>
#include <thread>
#include <functional>
#include <vector>
/// @param[in] nb_elements : size of your for loop
/// @param[in] functor(start, end) :
/// your function processing a sub chunk of the for loop.
/// "start" is the first index to process (included) until the index "end"
/// (excluded)
/// @code
/// for(int i = start; i < end; ++i)
/// computation(i);
/// @endcode
/// @param use_threads : enable / disable threads.
///
///
static
void parallel_for(unsigned nb_elements,
std::function<void (int start, int end)> functor,
bool use_threads = true)
{
// -------
unsigned nb_threads_hint = 20;//std::thread::hardware_concurrency();
unsigned nb_threads = nb_threads_hint == 0 ? 8 : (nb_threads_hint);
unsigned batch_size = nb_elements / nb_threads;
unsigned batch_remainder = nb_elements % nb_threads;
std::vector< std::thread > my_threads(nb_threads);
if( use_threads )
{
// Multithread execution
for(unsigned i = 0; i < nb_threads; ++i)
{
int start = i * batch_size;
my_threads[i] = std::thread(functor, start, start+batch_size);
}
}
else
{
// Single thread execution (for easy debugging)
for(unsigned i = 0; i < nb_threads; ++i){
int start = i * batch_size;
functor( start, start+batch_size );
}
}
// Deform the elements left
//int start = nb_threads * batch_size;
//functor( start, start+batch_remainder);
// Wait for the other thread to finish their task
if( use_threads )
std::for_each(my_threads.begin(), my_threads.end(), std::mem_fn(&std::thread::join));
}
#define PARALLEL_FOR_BEGIN(nb_elements) parallel_for(nb_elements, [&](int start, int end){ for(int i = start; i < end; ++i)
#define PARALLEL_FOR_END()})
template <typename Algebra, typename AbstractSimulation>
struct VectorizedEnvironment
{
AbstractSimulation& contact_sim;
using Scalar = typename Algebra::Scalar;
using Vector3 = typename Algebra::Vector3;
using Transform = typename Algebra::Transform;
std::vector<std::vector<Scalar>> sim_states_;
std::vector<std::vector<Scalar>> sim_states_with_action_and_variables;
std::vector<std::vector<Scalar>> sim_states_with_graphics_;
std::vector<tds::NeuralNetwork<Algebra> > neural_networks_;
int observation_dim_{0};
VectorizedEnvironment(AbstractSimulation& sim)
:contact_sim(sim)
{
neural_networks_.resize(g_num_total_threads);
sim_states_.resize(g_num_total_threads);
sim_states_with_action_and_variables.resize(g_num_total_threads);
sim_states_with_graphics_.resize(g_num_total_threads);
observation_dim_ = contact_sim.input_dim();
bool use_input_bias = false;
for (int index=0;index<g_num_total_threads;index++)
{
neural_networks_[index].set_input_dim(observation_dim_, use_input_bias);
//network.add_linear_layer(tds::NN_ACT_RELU, 32);
//neural_network.add_linear_layer(tds::NN_ACT_RELU, 64);
bool learn_bias = true;
neural_networks_[index].add_linear_layer(tds::NN_ACT_IDENTITY, sim.action_dim(),learn_bias);
}
}
virtual ~VectorizedEnvironment()
{
}
void init_neural_network(int index, const std::vector<double> &x)
{
neural_networks_[index].set_parameters(x);
}
void seed(long long int s) {
//std::cout<<"seed:" << s << std::endl;
std::srand(s);
}
std::vector< std::vector<double> > reset()
{
for (int index=0;index<g_num_total_threads;index++)
{
sim_states_[index].resize(0);
sim_states_[index].resize(contact_sim.input_dim(), Scalar(0));
MyAlgebra::Vector3 start_pos(0,0,.48);//0.4002847
MyAlgebra::Quaternion start_orn (0,0,0,1);
if (contact_sim.mb_->is_floating())
{
sim_states_[index][0] = start_orn.x();
sim_states_[index][1] = start_orn.y();
sim_states_[index][2] = start_orn.z();
sim_states_[index][3] = start_orn.w();
sim_states_[index][4] = start_pos.x();
sim_states_[index][5] = start_pos.y();
sim_states_[index][6] = start_pos.z();
int qoffset = 7;
for(int j=0;j<get_initial_poses<Scalar>().size();j++)
{
sim_states_[index][j+qoffset] = get_initial_poses<Scalar>()[j]+0.05*((std::rand() * 1. / RAND_MAX)-0.5)*2.0;
}
}
else
{
sim_states_[index][0] = start_pos.x();
sim_states_[index][1] = start_pos.y();
sim_states_[index][2] = start_pos.z();
sim_states_[index][3] = 0;
sim_states_[index][4] = 0;
sim_states_[index][5] = 0;
int qoffset = 6;
for(int j=0;j<get_initial_poses<Scalar>().size();j++)
{
sim_states_[index][j+qoffset] = get_initial_poses<Scalar>()[j]+0.05*((std::rand() * 1. / RAND_MAX)-0.5)*2.0;
}
}
}
std::vector< std::vector<double>> zero_actions(g_num_total_threads);
for (int i=0;i<g_num_total_threads;i++)
{
zero_actions[i].resize(get_initial_poses<Scalar>().size(), Scalar(0));
}
std::vector< std::vector<double>> observations;
observations.resize(g_num_total_threads);
std::vector<double> rewards;
rewards.resize(g_num_total_threads);
std::vector<bool> dones;
dones.resize(g_num_total_threads);
//todo: tune this
int settle_down_steps= 50;
for (int i=0;i<settle_down_steps;i++)
{
step(zero_actions, observations, rewards, dones);
}
//for (auto v : sim_state)
// std::cout << v << std::endl;
return observations;
}
void step( std::vector< std::vector<double>>& actions,std::vector<std::vector<double>>& observations,
std::vector<double>& rewards,std::vector<bool>& dones)
{
std::vector<std::vector<Scalar>> outputs(
g_num_total_threads, std::vector<Scalar>(contact_sim.output_dim()));
std::vector<std::vector<Scalar>> inputs(g_num_total_threads);
for (int index=0;index<g_num_total_threads;index++)
{
int simstate_size = sim_states_[index].size();
sim_states_with_action_and_variables[index] = sim_states_[index];
sim_states_with_action_and_variables[index].resize(contact_sim.input_dim_with_action_and_variables());
contact_sim.prepare_sim_state_with_action_and_variables(
sim_states_with_action_and_variables[index],
actions[index]);
inputs[index] = sim_states_with_action_and_variables[index];
}
//#define DEBUG_ON_CPU
#ifdef DEBUG_ON_CPU
for (int index =0; index<g_num_total_threads;index++)
{
sim_states_with_graphics_[index] = contact_sim.step_forward1(sim_states_with_action_and_variables[index]);
}
#else
#if 1
#pragma omp parallel
#pragma omp for
for (int index=0;index<g_num_total_threads;index++)
{
if (!dones[index])
{
contact_sim.forward_kernel(1,&outputs[index][0], &inputs[index][0]);
}
}
#else
PARALLEL_FOR_BEGIN(g_num_total_threads)
{
if (!dones[i])
{
contact_sim.forward_kernel_fast(1,&outputs[i][0], &inputs[i][0]);
}
}PARALLEL_FOR_END();
#endif
for (int index=0;index<g_num_total_threads;index++)
{
if (!dones[index])
{
sim_states_with_graphics_[index] = outputs[index];
}
}
#endif //DEBUG_ON_CPU
for (int index=0;index<g_num_total_threads;index++)
{
if (!dones[index])
{
bool done;
Scalar reward;
contact_sim.compute_reward_done(
sim_states_[index],
sim_states_with_graphics_[index],
reward, done);
rewards[index] = reward;
dones[index] = done;
sim_states_[index] = sim_states_with_graphics_[index];
sim_states_[index].resize(contact_sim.input_dim());
observations[index] = sim_states_[index];
} else
{
rewards[index] = 0;
}
}
}
inline const std::vector<double> policy(int index, const std::vector<double>& obs)
{
std::vector<double> action (get_initial_poses<Scalar>().size(), Scalar(0));
neural_networks_[index].compute(obs, action);
return action;
}
};
#endif //ARS_VECTORIZED_ENVIRONMENT_H |
spmd2-fixed.c | /* spmd2.c
* ... illustrates the SPMD pattern in OpenMP,
* using the commandline arguments
* to control the number of threads.
*
* Joel Adams, Calvin College, November 2009.
*
* Usage: ./spmd2 [numThreads]
*
* Exercise:
* - Compile & run with no commandline args
* - Rerun with different commandline args
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
// int id, numThreads;
printf("\n");
if (argc > 1)
{
omp_set_num_threads(atoi(argv[1]));
}
#pragma omp parallel
{
int id = omp_get_thread_num();
int numThreads = omp_get_num_threads();
printf("Hello from thread %d of %d\n", id, numThreads);
}
printf("\n");
return 0;
} |
parallel_transform.h | namespace Rcpp{
namespace parallel{
#if defined(RCPP11_EXPERIMENTAL_PARALLEL)
template <typename InputIterator, typename OutputIterator, typename Function>
void transform( int nthreads, InputIterator begin, InputIterator end, OutputIterator target, Function fun ){
std::vector<std::thread> workers(nthreads-1) ;
R_xlen_t chunk_size = std::distance(begin, end) / nthreads ;
R_xlen_t start=0;
for( int i=0; i<nthreads-1; i++, start+=chunk_size){
workers[i] = std::thread( std::transform<InputIterator, OutputIterator, Function>,
begin + start, begin + start + chunk_size,
target + start,
fun) ;
}
std::transform( begin + start, end, target + start, fun ) ;
for( int i=0; i<nthreads-1; i++) workers[i].join() ;
}
#else
template <typename InputIterator, typename OutputIterator, typename Function>
inline void transform( int, InputIterator begin, InputIterator end, OutputIterator target, Function fun ){
std::transform( begin, end, target, fun ) ;
}
#endif
}
}
template <class T,
class InputIterator,
class MapFunction,
class ReductionFunction>
T MapReduce_n(InputIterator in,
unsigned int size,
T baseval,
MapFunction mapper,
ReductionFunction reducer)
{
T val = baseval;
#pragma omp parallel
{
T map_val = baseval;
#pragma omp for nowait
for (auto i = 0U; i < size; ++i)
{
map_val = reducer(map_val, mapper(*(in + i)));
}
#pragma omp critical
val = reducer(val, map_val);
}
return val;
} |
sum_openmp.c | /*
Copyright (C) 2018 Francesc Alted
http://blosc.org
License: BSD 3-Clause (see LICENSE.txt)
Example program showing how to operate with compressed buffers.
To compile this program for synthetic data (default):
$ gcc -fopenmp -O3 sum_openmp.c -o sum_openmp -lblosc2
To run:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 199950000000
Sum time for uncompressed data: 0.0288 s, 26459.3 MB/s
Compression ratio: 762.9 MB -> 14.0 MB (54.6x)
Compression time: 0.288 s, 2653.5 MB/s
Sum for *compressed* data: 199950000000
Sum time for *compressed* data: 0.0188 s, 40653.7 MB/s
To use real (rainfall) data:
$ gcc -DRAINFALL -fopenmp -Ofast sum_openmp.c -o sum_openmp
And running it:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 29741012
Sum time for uncompressed data: 0.0149 s, 25627.4 MB/s
Compression ratio: 381.5 MB -> 71.3 MB (5.3x)
Compression time: 1.53 s, 249.1 MB/s
Sum for *compressed* data: 29741012
Sum time for *compressed* data: 0.0247 s, 15467.5 MB/s
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <errno.h>
#include <assert.h>
#include "blosc2.h"
#define KB 1024.
#define MB (1024*KB)
#define GB (1024*MB)
#define N (100 * 1000 * 1000)
#define CHUNKSIZE (16 * 1000)
#define NCHUNKS (N / CHUNKSIZE)
#define NTHREADS 8
#define NITER 5
#ifdef RAINFALL
#define SYNTHETIC false
#else
#define SYNTHETIC true
#endif
#if SYNTHETIC == true
#define DTYPE int64_t
#define CLEVEL 3
#define CODEC BLOSC_BLOSCLZ
#else
#define DTYPE float
#define CLEVEL 1
#define CODEC BLOSC_LZ4
#endif
int main(void) {
static DTYPE udata[N];
DTYPE chunk_buf[CHUNKSIZE];
int32_t isize = CHUNKSIZE * sizeof(DTYPE);
DTYPE sum, compressed_sum;
int64_t nbytes, cbytes;
blosc2_schunk* schunk;
int i, j, nchunk;
blosc_timestamp_t last, current;
double ttotal, itotal;
char* envvar = NULL;
printf("Blosc version info: %s (%s)\n",
BLOSC_VERSION_STRING, BLOSC_VERSION_DATE);
// Fill the buffer for a chunk
if (SYNTHETIC) {
for (j = 0; j < CHUNKSIZE; j++) {
chunk_buf[j] = j;
}
}
else {
struct stat info;
const char *filegrid = "rainfall-grid-150x150.bin";
if (stat(filegrid, &info) != 0) {
printf("Grid file %s not found!", filegrid);
exit(1);
}
char *cdata = malloc(info.st_size);
FILE *f = fopen(filegrid, "rb");
size_t blocks_read = fread(cdata, info.st_size, 1, f);
assert(blocks_read == 1);
fclose(f);
int dsize = blosc_getitem(cdata, 0, CHUNKSIZE, chunk_buf);
if (dsize < 0) {
printf("blosc_getitem() error. Error code: %d\n. Probaly reading too much data?", dsize);
exit(1);
}
free(cdata);
}
// Fill the uncompressed dataset with data chunks
for (i = 0; i < N / CHUNKSIZE; i++) {
for (j = 0; j < CHUNKSIZE; j++) {
udata[i * CHUNKSIZE + j] = chunk_buf[j];
}
}
// Reduce uncompressed dataset
ttotal = 1e10;
sum = 0;
for (int n = 0; n < NITER; n++) {
sum = 0;
blosc_set_timestamp(&last);
#pragma omp parallel for reduction (+:sum)
for (i = 0; i < N; i++) {
sum += udata[i];
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for uncompressed data: %10.0f\n", (double)sum);
printf("Sum time for uncompressed data: %.3g s, %.1f MB/s\n",
ttotal, (double)(isize * NCHUNKS) / (double)(ttotal * MB));
// Create a super-chunk container for the compressed container
long codec = CODEC;
envvar = getenv("SUM_COMPRESSOR");
if (envvar != NULL) {
codec = blosc_compname_to_compcode(envvar);
if (codec < 0) {
printf("Unknown compresssor: %s\n", envvar);
return 1;
}
}
blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS;
cparams.compcode = (uint8_t)codec;
long clevel = CLEVEL;
envvar = getenv("SUM_CLEVEL");
if (envvar != NULL) {
clevel = strtol(envvar, NULL, 10);
}
cparams.clevel = (uint8_t)clevel;
cparams.typesize = sizeof(DTYPE);
cparams.nthreads = 1;
blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS;
dparams.nthreads = 1;
blosc_set_timestamp(&last);
blosc2_storage storage = {.cparams=&cparams, .dparams=&dparams};
schunk = blosc2_schunk_new(&storage);
for (nchunk = 0; nchunk < NCHUNKS; nchunk++) {
for (i = 0; i < CHUNKSIZE; i++) {
chunk_buf[i] = udata[i + nchunk * CHUNKSIZE];
}
blosc2_schunk_append_buffer(schunk, chunk_buf, isize);
}
blosc_set_timestamp(¤t);
ttotal = blosc_elapsed_secs(last, current);
nbytes = schunk->nbytes;
cbytes = schunk->cbytes;
printf("Compression ratio: %.1f MB -> %.1f MB (%.1fx)\n",
nbytes / MB, cbytes / MB, (1. * nbytes) / cbytes);
printf("Compression time: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
int nthreads = NTHREADS;
envvar = getenv("OMP_NUM_THREADS");
if (envvar != NULL) {
long value;
value = strtol(envvar, NULL, 10);
if ((value != EINVAL) && (value >= 0)) {
nthreads = (int)value;
}
}
// Build buffers and contexts for computations
int nchunks_thread = NCHUNKS / nthreads;
int remaining_chunks = NCHUNKS - nchunks_thread * nthreads;
blosc2_context **dctx = malloc(nthreads * sizeof(void*));
DTYPE** chunk = malloc(nthreads * sizeof(void*));
for (j = 0; j < nthreads; j++) {
chunk[j] = malloc(CHUNKSIZE * sizeof(DTYPE));
}
// Reduce uncompressed dataset
blosc_set_timestamp(&last);
ttotal = 1e10;
compressed_sum = 0;
for (int n = 0; n < NITER; n++) {
compressed_sum = 0;
#pragma omp parallel for private(nchunk) reduction (+:compressed_sum)
for (j = 0; j < nthreads; j++) {
dctx[j] = blosc2_create_dctx(dparams);
for (nchunk = 0; nchunk < nchunks_thread; nchunk++) {
blosc2_decompress_ctx(dctx[j], schunk->data[j * nchunks_thread + nchunk], INT32_MAX,
(void*)(chunk[j]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[j][i];
//compressed_sum += i + (j * nchunks_thread + nchunk) * CHUNKSIZE;
}
}
}
for (nchunk = NCHUNKS - remaining_chunks; nchunk < NCHUNKS; nchunk++) {
blosc2_decompress_ctx(dctx[0], schunk->data[nchunk], INT32_MAX, (void*)(chunk[0]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[0][i];
//compressed_sum += i + nchunk * CHUNKSIZE;
}
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for *compressed* data: %10.0f\n", (double)compressed_sum);
printf("Sum time for *compressed* data: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
//printf("sum, csum: %f, %f\n", sum, compressed_sum);
if (SYNTHETIC) {
// difficult to fulfill for single precision
assert(sum == compressed_sum);
}
/* Free resources */
blosc2_schunk_free(schunk);
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<unsigned> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FPOptionsOverride(FpPragmaStack.CurrentValue);
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
unsigned getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
unsigned OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_binop__isge_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16)
// A*D function (colscale): GB (_AxD__isge_uint16)
// D*A function (rowscale): GB (_DxB__isge_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16)
// C=scalar+B GB (_bind1st__isge_uint16)
// C=scalar+B' GB (_bind1st_tran__isge_uint16)
// C=A+scalar GB (_bind2nd__isge_uint16)
// C=A'+scalar GB (_bind2nd_tran__isge_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
if (AcquireImageColormap(image,cube_info->colors) == MagickFalse)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
register DoublePixelPacket
*magick_restrict q;
register MagickRealType
alpha,
beta,
distance;
register PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register MagickRealType
alpha;
register PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7.0*amount*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=previous[u+v].opacity/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5.0*amount*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3.0*amount*previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity);
Riemersma(image,image_view,cube_info,level-1,NorthGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity);
Riemersma(image,image_view,cube_info,level-1,SouthGravity);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity);
Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=p->weights[i]*p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=1UL*GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
if (image->storage_class == PseudoClass)
colormap_index=(ssize_t *) AcquireQuantumMemory(image->colors+1,
sizeof(*colormap_index));
else
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize+1,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
Stencil_par3.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "malloc2D.h"
#include "timer.h"
int main(int argc, char *argv[])
{
struct timespec tstart_cpu, tstop_cpu;
double cpu_time;
int imax=2002, jmax = 2002;
int niter=1000, nburst=100;
double** restrict x = malloc2D(jmax, imax);
double** restrict xnew = malloc2D(jmax, imax);
#pragma omp target enter data map(to:x[0:jmax][0:imax], xnew[0:jmax][0:imax])
#pragma omp target teams
{
#pragma omp distribute parallel for simd collapse(2)
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp distribute parallel for simd collapse(2)
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
}
for (int iter = 0; iter < niter; iter+=nburst){
for (int ib = 0; ib < nburst; ib++){
cpu_timer_start(&tstart_cpu);
#pragma omp target teams distribute parallel for simd collapse(2)
for (int j = 1; j < jmax-1; j++){
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
#pragma omp target teams distribute parallel for simd collapse(2)
for (int j = 0; j < jmax; j++){
for (int i = 0; i < imax; i++){
x[j][i] = xnew[j][i];
}
}
cpu_time += cpu_timer_stop(tstart_cpu);
}
printf("Iter %d\n",iter+nburst);
}
#pragma omp target exit data map(from:x[0:jmax][0:imax], xnew[0:jmax][0:imax])
free(x);
free(xnew);
printf("Timing is %lf\n",cpu_time);
}
|
mandel-omp-for-row.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
//#pragma omp for schedule(runtime)
#pragma omp parallel for schedule(runtime) private(row,col)
for (row = 0; row < height; ++row) {
for (col = 0; col < width; ++col) {
{
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
plex.c | /*
* compute the duplex structure of two RNA strands,
* allowing only inter-strand base pairs.
* see cofold() for computing hybrid structures without
* restriction.
* Ivo Hofacker
* Vienna RNA package
*
*/
/*
* library containing the function used in rnaplex
* the program rnaplex uses the following function
* Lduplexfold: finds high scoring segments
* it stores the end-position of these segments in an array
* and call then for each of these positions the duplexfold function
* which allows one to make backtracking for each of the high scoring position
* It allows one to find suboptimal partially overlapping (depends on a a parameter)
* duplexes between a long RNA and a shorter one.
* Contrarly to RNAduplex, the energy model is not in E~log(N),
* where N is the length of an interial loop but used an affine model,
* where the extension and begin parameter are fitted to the energy
* parameter used by RNAduplex. This allows one to check for duplex between a short RNA(20nt)
* and a long one at the speed of 1Mnt/s. At this speed the whole genome (3Gnt) can be analyzed for one siRNA
* in about 50 minutes.
* The algorithm is based on an idea by Durbin and Eddy:when the alginment reach a value larger than a
* given threshold this value is stored in an array. When the alignment score goes
* then under this threshold, the alignemnent begin from this value, in that way the backtracking allow us
* to find all non-overlapping high-scoring segments.
* For more information check "durbin, biological sequence analysis"
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/pair_mat.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/plex.h"
#include "ViennaRNA/ali_plex.h"
#include "ViennaRNA/loops/all.h"
/* #################SIMD############### */
/* int subopt_sorted=0; */
#define PUBLIC
#define PRIVATE static
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
#define ARRAY 32 /*array size*/
#define UNIT 100
#define MINPSCORE -2 * UNIT
/**
*** Macro that define indices for the Single Array approach defined in FLduplexfold_XS->gain of 20% in runtime
*** so that everything is done in a 1D array.
*** input is idx for i, j for j and the length of the query RNA
*** 1D is divided in 6 subarrays, one for each number of allowed state
*** The length of each subarray is 5*L. 5 the maximal stored distance on the target sequence,
*** L is the length of the query sequence
**/
#define LCI(i, j, l) ((i) * l + j)
#define LINI(i, j, l) ((i + 5) * l + j)
#define LBXI(i, j, l) ((i + 10) * l + j)
#define LBYI(i, j, l) ((i + 15) * l + j)
#define LINIX(i, j, l) ((i + 20) * l + j)
#define LINIY(i, j, l) ((i + 25) * l + j)
PRIVATE void
encode_seqs(const char *s1,
const char *s2);
PRIVATE short *
encode_seq(const char *seq);
PRIVATE void
update_dfold_params(void);
/**
*** duplexfold(_XS)/backtrack(_XS) computes duplex interaction with standard energy and considers extension_cost
*** find_max(_XS)/plot_max(_XS) find suboptimals and MFE
*** fduplexfold(_XS) computes duplex in a plex way
**/
PRIVATE duplexT
duplexfold(const char *s1,
const char *s2,
const int extension_cost);
PRIVATE char *
backtrack(int i,
int j,
const int extension_cost);
PRIVATE void
find_max(const int *position,
const int *position_j,
const int delta,
const int threshold,
const int length,
const char *s1,
const char *s2,
const int extension_cost,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b);
PRIVATE void
plot_max(const int max,
const int max_pos,
const int max_pos_j,
const int alignment_length,
const char *s1,
const char *s2,
const int extension_cost,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b);
/* PRIVATE duplexT duplexfold_XS(const char *s1, const char *s2,const int **access_s1, const int **access_s2, const int i_pos, const int j_pos, const int threshold); */
PRIVATE duplexT
duplexfold_XS(const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int i_pos,
const int j_pos,
const int threshold,
const int i_flag,
const int j_flag);
/* PRIVATE char * backtrack_XS(int i, int j, const int** access_s1, const int** access_s2); */
PRIVATE char *
backtrack_XS(int i,
int j,
const int **access_s1,
const int **access_s2,
const int i_flag,
const int j_flag);
PRIVATE void
find_max_XS(const int *position,
const int *position_j,
const int delta,
const int threshold,
const int alignment_length,
const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b);
PRIVATE void
plot_max_XS(const int max,
const int max_pos,
const int max_pos_j,
const int alignment_length,
const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b);
PRIVATE duplexT
fduplexfold(const char *s1,
const char *s2,
const int extension_cost,
const int il_a,
const int il_b,
const int b_a,
const int b_b);
PRIVATE char *
fbacktrack(int i,
int j,
const int extension_cost,
const int il_a,
const int il_b,
const int b_a,
const int b_b,
int *dG);
PRIVATE duplexT
fduplexfold_XS(const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int i_pos,
const int j_pos,
const int threshold,
const int il_a,
const int il_b,
const int b_a,
const int b_b);
PRIVATE char *
fbacktrack_XS(int i,
int j,
const int **access_s1,
const int **access_s2,
const int i_pos,
const int j_pos,
const int il_a,
const int il_b,
const int b_a,
const int b_b,
int *dGe,
int *dGeplex,
int *dGx,
int *dGy);
/*@unused@*/
#define MAXSECTORS 500 /* dimension for a backtrack array */
#define LOCALITY 0. /* locality parameter for base-pairs */
PRIVATE vrna_param_t *P = NULL;
/**
*** energy array used in fduplexfold and fduplexfold_XS
*** We do not use the 1D array here as it is not time critical
*** It also makes the code more readable
*** c -> stack;in -> interior loop;bx/by->bulge;inx/iny->1xn loops
**/
PRIVATE int **c = NULL, **in = NULL, **bx = NULL, **by = NULL, **inx = NULL, **iny = NULL;
/**
*** S1, SS1, ... contains the encoded sequence for target and query
*** n1, n2, n3, n4 contains target and query length
**/
PRIVATE short *S1 = NULL, *SS1 = NULL, *S2 = NULL, *SS2 = NULL; /*contains the sequences*/
PRIVATE int n1, n2; /* sequence lengths */
PRIVATE int n3, n4; /*sequence length for the duplex*/;
/*-----------------------------------------------------------------------duplexfold_XS---------------------------------------------------------------------------*/
/**
*** duplexfold_XS is the pendant to the duplex function as defined in duplex.c
*** but takes the accessibility into account. It is similar to the MFE version of RNAup
*** The only approximation made is that target 3' end - query 5' end base pair is known
*** s1,s2 are the query and target sequence; access_s1, access_s2 are the accessibility
*** profiles, i_pos, j_pos are the coordinates of the closing pair.
**/
PRIVATE duplexT
duplexfold_XS(const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int i_pos,
const int j_pos,
const int threshold,
const int i_flag,
const int j_flag)
{
int i, j, p, q, Emin = INF, l_min = 0, k_min = 0;
char *struc;
vrna_md_t md;
struc = NULL;
duplexT mfe;
n3 = (int)strlen(s1);
n4 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
update_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
for (i = 0; i <= n3; i++)
c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
for (i = 0; i <= n3; i++)
for (j = 0; j <= n4; j++)
c[i][j] = INF;
encode_seqs(s1, s2);
int type, type2, type3, E, k, l;
i = n3 - i_flag;
j = 1 + j_flag;
type = pair[S1[i]][S2[j]];
if (!type) {
printf("Error during initialization of the duplex in duplexfold_XS\n");
mfe.structure = NULL;
mfe.energy = INF;
return mfe;
}
c[i][j] = P->DuplexInit;
/** if (type>2) c[i][j] += P->TerminalAU;
*** c[i][j]+=P->dangle3[rtype[type]][SS1[i+1]];
*** c[i][j]+=P->dangle5[rtype[type]][SS2[j-1]];
*** The three above lines are replaced by the line below
**/
c[i][j] += E_ExtLoop(rtype[type], (j_flag ? SS2[j - 1] : -1), (i_flag ? SS1[i + 1] : -1), P);
/* if(j_flag ==0 && i_flag==0){ */
/* c[i][j] += E_ExtLoop(rtype[type], -1 , -1 , P); */
/* }else if(j_flag ==0 && i_flag==1){ */
/* c[i][j] += E_ExtLoop(rtype[type], -1 , SS1[i+1], P); */
/* }else if(j_flag ==1 && i_flag==0){ */
/* c[i][j] += E_ExtLoop(rtype[type], SS2[j-1] , -1, P); */
/* }else { */
/* c[i][j] += E_ExtLoop(rtype[type], SS2[j-1] , SS1[i+1], P); */
/* } */
/* Just in case we have only one bp, we initialize ... */
/* k_min, l_min and Emin */
k_min = i;
l_min = j;
Emin = c[i][j];
for (k = i; k > 1; k--) {
if (k < i)
c[k + 1][0] = INF;
for (l = j; l <= n4 - 1; l++) {
if (!(k == i && l == j))
c[k][l] = INF;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
for (p = k + 1; p <= n3 - i_flag && p < k + MAXLOOP - 1; p++) {
for (q = l - 1; q >= 1 + j_flag; q--) {
if (p - k + l - q - 2 > MAXLOOP)
break;
type3 = pair[S1[p]][S2[q]];
if (!type3)
continue;
E = E_IntLoop(p - k - 1,
l - q - 1,
type2,
rtype[type3],
SS1[k + 1],
SS2[l - 1],
SS1[p - 1],
SS2[q + 1],
P);
c[k][l] = MIN2(c[k][l], c[p][q] + E);
}
}
E = c[k][l];
E += access_s1[i - k + 1][i_pos] + access_s2[l - 1][j_pos + (l - 1) - 1];
/**if (type2>2) E += P->TerminalAU;
***if (k>1) E += P->dangle5[type2][SS1[k-1]];
***if (l<n4) E += P->dangle3[type2][SS2[l+1]];
*** Replaced by the line below
**/
E += E_ExtLoop(type2, (k > 1) ? SS1[k - 1] : -1, (l < n4) ? SS2[l + 1] : -1, P);
if (E < Emin) {
Emin = E;
k_min = k;
l_min = l;
}
}
}
if (Emin > threshold) {
mfe.energy = INF;
mfe.ddG = INF;
mfe.structure = NULL;
for (i = 0; i <= n3; i++)
free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
return mfe;
} else {
struc = backtrack_XS(k_min, l_min, access_s1, access_s2, i_flag, j_flag);
}
/**
*** find best dangles combination
**/
int dx_5, dx_3, dy_5, dy_3, dGx, dGy, bonus_x;
dx_5 = 0;
dx_3 = 0;
dy_5 = 0;
dy_3 = 0;
dGx = 0;
dGy = 0;
bonus_x = 0;
/* x--------x */
/* |||||||| */
/* x--------x */
dGx = access_s1[i - k_min + 1][i_pos];
dx_3 = 0;
dx_5 = 0;
bonus_x = 0;
dGy = access_s2[l_min - j + 1][j_pos + (l_min - 1)];
mfe.tb = i_pos - 9 - i + k_min - 1 - dx_5;
mfe.te = i_pos - 9 - 1 + dx_3;
mfe.qb = j_pos - 9 - 1 - dy_5;
mfe.qe = j_pos + l_min - 3 - 9 + dy_3;
mfe.ddG = (double)Emin * 0.01;
mfe.dG1 = (double)dGx * 0.01;
mfe.dG2 = (double)dGy * 0.01;
mfe.energy = mfe.ddG - mfe.dG1 - mfe.dG2;
mfe.structure = struc;
for (i = 0; i <= n3; i++)
free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
return mfe;
}
PRIVATE char *
backtrack_XS(int i,
int j,
const int **access_s1,
const int **access_s2,
const int i_flag,
const int j_flag)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
char *st1, *st2, *struc;
st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1));
i0 = i; /*MAX2(i-1,1);*/ j0 = j;/*MIN2(j+1,n4);*/
while (i <= n3 - i_flag && j >= 1 + j_flag) {
E = c[i][j];
traced = 0;
st1[i - 1] = '(';
st2[j - 1] = ')';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex bli");
for (k = i + 1; k <= n3 && k > i - MAXLOOP - 2; k++) {
for (l = j - 1; l >= 1; l--) {
int LE;
if (i - k + l - j - 2 > MAXLOOP)
break;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(k - i - 1,
j - l - 1,
type,
rtype[type2],
SS1[i + 1],
SS2[j - 1],
SS1[k - 1],
SS2[l + 1],
P);
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
break;
}
}
if (traced)
break;
}
if (!traced) {
#if 0
if (i < n3)
E -= P->dangle3[rtype[type]][SS1[i + 1]]; /* +access_s1[1][i+1]; */
if (j > 1)
E -= P->dangle5[rtype[type]][SS2[j - 1]]; /* +access_s2[1][j+1]; */
if (type > 2)
E -= P->TerminalAU;
#endif
E -= E_ExtLoop(rtype[type], SS2[j - 1], SS1[i + 1], P);
break;
if (E != P->DuplexInit)
vrna_message_error("backtrack failed in fold duplex bal");
else
break;
}
}
/* if (i<n3) i++; */
/* if (j>1) j--; */
struc = (char *)vrna_alloc(i - i0 + 1 + j0 - j + 1 + 2);
for (k = MAX2(i0, 1); k <= i; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j; k <= j0; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i0 - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j - 1);
free(st1);
free(st2);
return struc;
}
/**
*** fduplexfold(_XS) computes the interaction based on the plex energy model.
*** Faster than duplex approach, but not standard model compliant
*** We use the standard matrix (c, in, etc..., because we backtrack)
**/
PRIVATE duplexT
fduplexfold_XS(const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int i_pos,
const int j_pos,
const int threshold,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
/**
*** i,j recursion index
*** Emin, i_min, j_min MFE position and energy
*** mfe struc duplex structure
**/
int i, j, Emin, i_min, j_min, l1;
duplexT mfe;
char *struc;
/**
*** bext=b_a bulge extension parameter for linear model
*** iopen=il_b interior opening for linear model
*** iext_s=2*il_a asymmetric extension for interior loop
*** iext_ass=60+il_a symmetric extension for interior loop
*** min_colonne=INF; max score of a row
*** i_length;
*** max_pos; position of best hit during recursion on target
*** max_pos_j; position of best hit during recursion on query
*** temp; temp variable for min_colonne
*** min_j_colonne; position of the minimum on query in row j
*** max=INF; absolute MFE
*** n3,n4 length of target and query
*** DJ contains the accessibility penalty for the query sequence
*** maxPenalty contains the maximum penalty
**/
int bopen = b_b;
int bext = b_a;
int iopen = il_b;
int iext_s = 2 * il_a; /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */
int iext_ass = 50 + il_a; /* iext_ass assymetric extension of interior loop, either on i or on j side. */
int min_colonne = INF; /* enthaelt das maximum einer kolonne */
int i_length;
int max_pos; /* get position of the best hit */
int max_pos_j;
int temp = INF;
int min_j_colonne;
int max = INF;
int **DJ;
int maxPenalty[4];
vrna_md_t md;
/**
*** variable initialization
**/
n3 = (int)strlen(s1);
n4 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
update_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
/**
*** array initialization
**/
c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
in = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
bx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
by = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
inx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
iny = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
/* #pragma omp parallel for */
for (i = 0; i <= n3; i++) {
c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
in[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
bx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
by[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
inx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
iny[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
}
for (i = 0; i < n3; i++) {
for (j = 0; j < n4; j++) {
in[i][j] = INF; /* no in before 1 */
c[i][j] = INF; /* no bulge and no in before n2 */
bx[i][j] = INF; /* no bulge before 1 */
by[i][j] = INF;
inx[i][j] = INF; /* no bulge before 1 */
iny[i][j] = INF;
}
}
/**
*** sequence encoding
**/
encode_seqs(s1, s2);
/**
*** Compute max accessibility penalty for the query only once
**/
maxPenalty[0] = (int)-1 * P->stack[2][2] / 2;
maxPenalty[1] = (int)-1 * P->stack[2][2];
maxPenalty[2] = (int)-3 * P->stack[2][2] / 2;
maxPenalty[3] = (int)-2 * P->stack[2][2];
DJ = (int **)vrna_alloc(4 * sizeof(int *));
DJ[0] = (int *)vrna_alloc((1 + n4) * sizeof(int));
DJ[1] = (int *)vrna_alloc((1 + n4) * sizeof(int));
DJ[2] = (int *)vrna_alloc((1 + n4) * sizeof(int));
DJ[3] = (int *)vrna_alloc((1 + n4) * sizeof(int));
j = n4 - 9;
while (--j > 9) {
int jdiff = j_pos + j - 11;
/**
*** Depending in which direction (i:1->n vs j:m->1) the accessibility is computed we get slightly different results.
*** We reduce the discrepancies by taking the average of d^i_k and d^j_l
**/
DJ[0][j] = 0.5 *
(access_s2[5][jdiff + 4] - access_s2[4][jdiff + 4] + access_s2[5][jdiff] -
access_s2[4][jdiff - 1]);
DJ[1][j] = 0.5 *
(access_s2[5][jdiff + 5] - access_s2[4][jdiff + 5] + access_s2[5][jdiff + 1] -
access_s2[4][jdiff]) + DJ[0][j];
DJ[2][j] = 0.5 *
(access_s2[5][jdiff + 6] - access_s2[4][jdiff + 6] + access_s2[5][jdiff + 2] -
access_s2[4][jdiff + 1]) + DJ[1][j];
DJ[3][j] = 0.5 *
(access_s2[5][jdiff + 7] - access_s2[4][jdiff + 7] + access_s2[5][jdiff + 3] -
access_s2[4][jdiff + 2]) + DJ[2][j];
/*
* DJ[0][j] = access_s2[5][jdiff+4] - access_s2[4][jdiff+4] ;
* DJ[1][j] = access_s2[5][jdiff+5] - access_s2[4][jdiff+5] + DJ[0][j];
* DJ[2][j] = access_s2[5][jdiff+6] - access_s2[4][jdiff+6] + DJ[1][j];
* DJ[3][j] = access_s2[5][jdiff+7] - access_s2[4][jdiff+7] + DJ[2][j];
* DJ[0][j] = MIN2(DJ[0][j],maxPenalty[0]);
* DJ[1][j] = MIN2(DJ[1][j],maxPenalty[1]);
* DJ[2][j] = MIN2(DJ[2][j],maxPenalty[2]);
* DJ[3][j] = MIN2(DJ[3][j],maxPenalty[3]);
*/
}
/**
*** Start of the recursion
*** first and last 10 nucleotides on target and query are dummy nucleotides
*** allow to reduce number of if test
**/
i = 11;
i_length = n3 - 9;
while (i < i_length) {
int di1, di2, di3, di4;
int idiff = i_pos - (n3 - 10 - i);
di1 = 0.5 *
(access_s1[5][idiff + 4] - access_s1[4][idiff + 4] + access_s1[5][idiff] -
access_s1[4][idiff - 1]);
di2 = 0.5 *
(access_s1[5][idiff + 3] - access_s1[4][idiff + 3] + access_s1[5][idiff - 1] -
access_s1[4][idiff - 2]) + di1;
di3 = 0.5 *
(access_s1[5][idiff + 2] - access_s1[4][idiff + 2] + access_s1[5][idiff - 2] -
access_s1[4][idiff - 3]) + di2;
di4 = 0.5 *
(access_s1[5][idiff + 1] - access_s1[4][idiff + 1] + access_s1[5][idiff - 3] -
access_s1[4][idiff - 4]) + di3;
/*
* di1 = access_s1[5][idiff] - access_s1[4][idiff-1];
* di2 = access_s1[5][idiff-1] - access_s1[4][idiff-2] + di1;
* di3 = access_s1[5][idiff-2] - access_s1[4][idiff-3] + di2;
* di4 = access_s1[5][idiff-3] - access_s1[4][idiff-4] + di3;
* di1=MIN2(di1,maxPenalty[0]);
* di2=MIN2(di2,maxPenalty[1]);
* di3=MIN2(di3,maxPenalty[2]);
* di4=MIN2(di4,maxPenalty[3]);
*/
j = n4 - 9;
min_colonne = INF;
while (10 < --j) {
int dj1, dj2, dj3, dj4;
int jdiff = j_pos + j - 11;
dj1 = DJ[0][j];
dj2 = DJ[1][j];
dj3 = DJ[2][j];
dj4 = DJ[3][j];
int type, type2;
type = pair[S1[i]][S2[j]];
/**
*** Start duplex
**/
/*
* c[i][j]=type ? P->DuplexInit + access_s1[1][idiff]+access_s2[1][jdiff] : INF;
*/
c[i][j] = type ? P->DuplexInit : INF;
/**
*** update lin bx by linx liny matrix
**/
type2 = pair[S2[j + 1]][S1[i - 1]];
/**
*** start/extend interior loop
**/
in[i][j] = MIN2(
c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1,
in[i - 1][j] + iext_ass + di1);
/**
*** start/extend nx1 target
*** use same type2 as for in
**/
inx[i][j] = MIN2(
c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1,
inx[i - 1][j] + iext_ass + di1);
/**
*** start/extend 1xn target
*** use same type2 as for in
**/
iny[i][j] = MIN2(
c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1,
iny[i][j + 1] + iext_ass + dj1);
/**
*** extend interior loop
**/
in[i][j] = MIN2(in[i][j], in[i][j + 1] + iext_ass + dj1);
in[i][j] = MIN2(in[i][j], in[i - 1][j + 1] + iext_s + di1 + dj1);
/**
*** start/extend bulge target
**/
type2 = pair[S2[j]][S1[i - 1]];
bx[i][j] =
MIN2(bx[i - 1][j] + bext + di1,
c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + di1);
/**
*** start/extend bulge query
**/
type2 = pair[S2[j + 1]][S1[i]];
by[i][j] =
MIN2(by[i][j + 1] + bext + dj1,
c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + dj1);
/**
***end update recursion
***######################## Start stack extension##############################
**/
if (!type)
continue;
c[i][j] += E_ExtLoop(type, SS1[i - 1], SS2[j + 1], P);
/**
*** stack extension
**/
if ((type2 = pair[S1[i - 1]][S2[j + 1]]))
c[i][j] = MIN2(c[i - 1][j + 1] + P->stack[rtype[type]][type2] + di1 + dj1, c[i][j]);
/**
*** 1x0 / 0x1 stack extension
**/
if ((type2 = pair[S1[i - 1]][S2[j + 2]]))
c[i][j] = MIN2(c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + di1 + dj2,
c[i][j]);
if ((type2 = pair[S1[i - 2]][S2[j + 1]]))
c[i][j] = MIN2(c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + di2 + dj1,
c[i][j]);
/**
*** 1x1 / 2x2 stack extension
**/
if ((type2 = pair[S1[i - 2]][S2[j + 2]]))
c[i][j] = MIN2(
c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj2,
c[i][j]);
if ((type2 = pair[S1[i - 3]][S2[j + 3]])) {
c[i][j] =
MIN2(c[i - 3][j + 3] +
P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di3 + dj3,
c[i][j]);
}
/**
*** 1x2 / 2x1 stack extension
*** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to
*** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]]
**/
if ((type2 = pair[S1[i - 3]][S2[j + 2]])) {
c[i][j] =
MIN2(
c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + di3 + dj2,
c[i][j]);
}
if ((type2 = pair[S1[i - 2]][S2[j + 3]])) {
c[i][j] =
MIN2(
c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di2 + dj3,
c[i][j]);
}
/**
*** 2x3 / 3x2 stack extension
**/
if ((type2 = pair[S1[i - 4]][S2[j + 3]]))
c[i][j] = MIN2(c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di4 + dj3, c[i][j]);
if ((type2 = pair[S1[i - 3]][S2[j + 4]]))
c[i][j] = MIN2(c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di3 + dj4, c[i][j]);
/**
*** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3
**/
/**
*** 3x3 or more
**/
c[i][j] = MIN2(
in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + di3 + dj3,
c[i][j]);
/**
*** 2xn or more
**/
c[i][j] = MIN2(
in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di4 + dj2,
c[i][j]);
/**
*** nx2 or more
**/
c[i][j] = MIN2(
in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di2 + dj4,
c[i][j]);
/**
*** nx1 n>2
**/
c[i][j] = MIN2(
inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di3 + dj1,
c[i][j]);
/**
*** 1xn n>2
**/
c[i][j] = MIN2(
iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + dj3 + di1,
c[i][j]);
/**
*** nx0 n>1
**/
int bAU;
bAU = (type > 2 ? P->TerminalAU : 0);
c[i][j] = MIN2(bx[i - 2][j + 1] + di2 + dj1 + bext + bAU, c[i][j]);
/**
*** 0xn n>1
**/
c[i][j] = MIN2(by[i - 1][j + 2] + di1 + dj2 + bext + bAU, c[i][j]);
/*
* remove this line printf("%d\t",c[i][j]);
*/
temp = min_colonne;
min_colonne = MIN2(c[i][j] + E_ExtLoop(rtype[type], SS2[j - 1], SS1[i + 1], P), min_colonne);
if (temp > min_colonne)
min_j_colonne = j;
/* ---------------------------------------------------------------------end update */
}
if (max >= min_colonne) {
max = min_colonne;
max_pos = i;
max_pos_j = min_j_colonne;
}
i++;
/*
* remove this line printf("\n");
*/
}
Emin = max;
if (Emin > threshold) {
free(S1);
free(S2);
free(SS1);
free(SS2);
for (i = 0; i <= n3; i++) {
free(c[i]);
free(in[i]);
free(bx[i]);
free(by[i]);
free(inx[i]);
free(iny[i]);
}
for (i = 0; i <= 3; i++)
free(DJ[i]);
free(c);
free(in);
free(bx);
free(by);
free(inx);
free(iny);
free(DJ);
mfe.energy = 0;
mfe.structure = NULL;
return mfe;
}
i_min = max_pos;
j_min = max_pos_j;
int dGe, dGeplex, dGx, dGy;
dGe = dGeplex = dGx = dGy = 0;
/* printf("MAX fduplexfold_XS %d\n",Emin); */
struc = fbacktrack_XS(i_min,
j_min,
access_s1,
access_s2,
i_pos,
j_pos,
il_a,
il_b,
b_a,
b_b,
&dGe,
&dGeplex,
&dGx,
&dGy);
l1 = strchr(struc, '&') - struc;
int size;
size = strlen(struc) - 1;
int lengthx;
int endx;
int lengthy;
int endy;
lengthx = l1;
lengthx -= (struc[0] == '.' ? 1 : 0);
lengthx -= (struc[l1 - 1] == '.' ? 1 : 0);
endx = (i_pos - (n3 - i_min));
lengthy = size - l1;
lengthy -= (struc[size] == '.' ? 1 : 0);
lengthy -= (struc[l1 + 1] == '.' ? 1 : 0);
endy = j_pos + j_min + lengthy - 22;
if (i_min < n3 - 10)
i_min++;
if (j_min > 11)
j_min--;
mfe.i = i_min;
mfe.j = j_min;
mfe.ddG = (double)Emin * 0.01;
mfe.structure = struc;
mfe.energy_backtrack = (double)dGe * 0.01;
mfe.energy = (double)dGeplex * 0.01;
mfe.opening_backtrack_x = (double)dGx * 0.01;
mfe.opening_backtrack_y = (double)dGy * 0.01;
mfe.dG1 = 0; /* !remove access to complete access array (double) access_s1[lengthx][endx+10] * 0.01; */
mfe.dG2 = 0; /* !remove access to complete access array (double) access_s2[lengthy][endy+10] * 0.01; */
free(S1);
free(S2);
free(SS1);
free(SS2);
for (i = 0; i <= n3; i++) {
free(c[i]);
free(in[i]);
free(bx[i]);
free(by[i]);
free(inx[i]);
free(iny[i]);
}
for (i = 0; i <= 3; i++)
free(DJ[i]);
free(DJ);
free(c);
free(in);
free(bx);
free(by);
free(iny);
free(inx);
return mfe;
}
PRIVATE char *
fbacktrack_XS(int i,
int j,
const int **access_s1,
const int **access_s2,
const int i_pos,
const int j_pos,
const int il_a,
const int il_b,
const int b_a,
const int b_b,
int *dG,
int *dGplex,
int *dGx,
int *dGy)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
char *st1, *st2, *struc;
int bopen = b_b;
int bext = b_a;
int iopen = il_b;
int iext_s = 2 * il_a; /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */
int iext_ass = 50 + il_a; /* iext_ass assymetric extension of interior loop, either on i or on j side. */
st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1));
i0 = MIN2(i + 1, n3 - 10);
j0 = MAX2(j - 1, 11);
int state;
state = 1; /* we start backtracking from a a pair , i.e. c-matrix */
/* state 1 -> base pair, c
* state 2 -> interior loop, in
* state 3 -> bx loop, bx
* state 4 -> by loop, by
*/
traced = 1;
k = i;
l = j; /* stores the i,j information for subsequence usage see * */
int idiff, jdiff;
/**
*** (type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]];
**/
int maxPenalty[4];
vrna_md_t md;
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
update_dfold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
maxPenalty[0] = (int)-1 * P->stack[2][2] / 2;
maxPenalty[1] = (int)-1 * P->stack[2][2];
maxPenalty[2] = (int)-3 * P->stack[2][2] / 2;
maxPenalty[3] = (int)-2 * P->stack[2][2];
type = pair[S1[i]][S2[j]];
*dG += E_ExtLoop(rtype[type], SS2[j - 1], SS1[i + 1], P);
*dGplex = *dG;
while (i > 10 && j <= n4 - 9 && traced) {
int di1, di2, di3, di4;
idiff = i_pos - (n3 - 10 - i);
di1 = 0.5 *
(access_s1[5][idiff + 4] - access_s1[4][idiff + 4] + access_s1[5][idiff] -
access_s1[4][idiff - 1]);
di2 = 0.5 *
(access_s1[5][idiff + 3] - access_s1[4][idiff + 3] + access_s1[5][idiff - 1] -
access_s1[4][idiff - 2]) + di1;
di3 = 0.5 *
(access_s1[5][idiff + 2] - access_s1[4][idiff + 2] + access_s1[5][idiff - 2] -
access_s1[4][idiff - 3]) + di2;
di4 = 0.5 *
(access_s1[5][idiff + 1] - access_s1[4][idiff + 1] + access_s1[5][idiff - 3] -
access_s1[4][idiff - 4]) + di3;
/*
* di1 = access_s1[5][idiff] - access_s1[4][idiff-1];
* di2 = access_s1[5][idiff-1] - access_s1[4][idiff-2] + di1;
* di3 = access_s1[5][idiff-2] - access_s1[4][idiff-3] + di2;
* di4 = access_s1[5][idiff-3] - access_s1[4][idiff-4] + di3;
* di1=MIN2(di1,maxPenalty[0]);
* di2=MIN2(di2,maxPenalty[1]);
* di3=MIN2(di3,maxPenalty[2]);
* di4=MIN2(di4,maxPenalty[3]);
*/
int dj1, dj2, dj3, dj4;
jdiff = j_pos + j - 11;
dj1 = 0.5 *
(access_s2[5][jdiff + 4] - access_s2[4][jdiff + 4] + access_s2[5][jdiff] -
access_s2[4][jdiff - 1]);
dj2 = 0.5 *
(access_s2[5][jdiff + 5] - access_s2[4][jdiff + 5] + access_s2[5][jdiff + 1] -
access_s2[4][jdiff]) + dj1;
dj3 = 0.5 *
(access_s2[5][jdiff + 6] - access_s2[4][jdiff + 6] + access_s2[5][jdiff + 2] -
access_s2[4][jdiff + 1]) + dj2;
dj4 = 0.5 *
(access_s2[5][jdiff + 7] - access_s2[4][jdiff + 7] + access_s2[5][jdiff + 3] -
access_s2[4][jdiff + 2]) + dj3;
/*
* dj1 = access_s2[5][jdiff+4] - access_s2[4][jdiff+4];
* dj2 = access_s2[5][jdiff+5] - access_s2[4][jdiff+5] + dj1;
* dj3 = access_s2[5][jdiff+6] - access_s2[4][jdiff+6] + dj2;
* dj4 = access_s2[5][jdiff+7] - access_s2[4][jdiff+7] + dj3;
* dj1=MIN2(dj1,maxPenalty[0]);
* dj2=MIN2(dj2,maxPenalty[1]);
* dj3=MIN2(dj3,maxPenalty[2]);
* dj4=MIN2(dj4,maxPenalty[3]);
*/
traced = 0;
switch (state) {
case 1:
type = pair[S1[i]][S2[j]];
int bAU;
bAU = (type > 2 ? P->TerminalAU : 0);
if (!type)
vrna_message_error("backtrack failed in fold duplex");
type2 = pair[S1[i - 1]][S2[j + 1]];
if (type2 && c[i][j] == (c[i - 1][j + 1] + P->stack[rtype[type]][type2] + di1 + dj1)) {
k = i - 1;
l = j + 1;
(*dG) += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di1;
*dGy += dj1;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 1]][S2[j + 2]];
if (type2 &&
c[i][j] == (c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + di1 + dj2)) {
k = i - 1;
l = j + 2;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di1;
*dGy += dj2;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 2]][S2[j + 1]];
if (type2 &&
c[i][j] == (c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + di2 + dj1)) {
k = i - 2;
l = j + 1;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di2;
*dGy += dj1;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 2]][S2[j + 2]];
if (type2 &&
c[i][j] ==
(c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj2)) {
k = i - 2;
l = j + 2;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di2;
*dGy += dj2;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 3]][S2[j + 3]];
if (type2 &&
c[i][j] ==
(c[i - 3][j + 3] +
P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di3 +
dj3)) {
k = i - 3;
l = j + 3;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di3;
*dGy += dj3;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 3]][S2[j + 2]];
if (type2 &&
c[i][j] ==
(c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] +
di3 +
dj2)) {
k = i - 3;
l = j + 2;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di3;
*dGy += dj2;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 2]][S2[j + 3]];
if (type2 &&
c[i][j] ==
(c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] +
di2 +
dj3)) {
k = i - 2;
l = j + 3;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di2;
*dGy += dj3;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 4]][S2[j + 3]];
if (type2 && c[i][j] == (c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di4 + dj3)) {
k = i - 4;
l = j + 3;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di2;
*dGy += dj3;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 3]][S2[j + 4]];
if (type2 && c[i][j] == (c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di3 + dj4)) {
k = i - 3;
l = j + 4;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di2;
*dGy += dj3;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
if (c[i][j] ==
(in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + di3 + dj3 + 2 *
iext_s)) {
k = i;
l = j;
*dGplex += P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s;
*dGx += di3;
*dGy += dj3;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 3;
j = j + 3;
state = 2;
traced = 1;
break;
}
if (c[i][j] ==
(in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + di4 + dj2 +
iext_s +
2 * iext_ass)) {
k = i;
l = j;
*dGplex += P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass;
*dGx += di4;
*dGy += dj2;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 4;
j = j + 2;
state = 2;
traced = 1;
break;
}
if (c[i][j] ==
(in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj4 +
iext_s +
2 * iext_ass)) {
k = i;
l = j;
*dGplex += P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass;
*dGx += di2;
*dGy += dj4;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 2;
j = j + 4;
state = 2;
traced = 1;
break;
}
if (c[i][j] ==
(inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass +
iext_ass + di3 + dj1)) {
k = i;
l = j;
*dGplex += P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass +
di3 + dj1;
*dGx += di3;
*dGy += dj1;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 3;
j = j + 1;
state = 5;
traced = 1;
break;
}
if (c[i][j] ==
(iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass +
iext_ass + di1 + dj3)) {
k = i;
l = j;
*dGplex += P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass +
di1 + dj3;
*dGx += di1;
*dGy += dj3;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 1;
j = j + 3;
state = 6;
traced = 1;
break;
}
if (c[i][j] == (bx[i - 2][j + 1] + di2 + dj1 + bext + bAU)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
*dGplex += bext + bAU;
*dGx += di2;
*dGy += dj1;
i = i - 2;
j = j + 1;
state = 3;
traced = 1;
break;
}
if (c[i][j] == (by[i - 1][j + 2] + di1 + dj2 + bext + bAU)) {
k = i;
l = j;
*dGplex += bext + bAU;
*dGx += di1;
*dGy += dj2;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 1;
j = j + 2;
state = 4;
traced = 1;
break;
}
break;
case 2:
if (in[i][j] == (in[i - 1][j + 1] + iext_s + di1 + dj1)) {
i--;
j++;
*dGplex += iext_s;
*dGx += di1;
*dGy += dj1;
state = 2;
traced = 1;
break;
}
if (in[i][j] == (in[i - 1][j] + iext_ass + di1)) {
i = i - 1;
*dGplex += iext_ass;
*dGx += di1;
state = 2;
traced = 1;
break;
}
if (in[i][j] == (in[i][j + 1] + iext_ass + dj1)) {
j++;
state = 2;
*dGy += dj1;
*dGplex += iext_ass;
traced = 1;
break;
}
type2 = pair[SS2[j + 1]][SS1[i - 1]];
if (type2 &&
in[i][j] ==
(c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 + dj1)) {
*dGplex += P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s;
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di1;
*dGy += dj1;
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 3:
if (bx[i][j] == (bx[i - 1][j] + bext + di1)) {
i--;
*dGplex += bext;
*dGx += di1;
state = 3;
traced = 1;
break;
}
type2 = pair[S2[j]][S1[i - 1]];
if (type2 &&
bx[i][j] == (c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + di1)) {
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += bopen + bext + (type2 > 2 ? P->TerminalAU : 0);
*dGx += di1;
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 4:
if (by[i][j] == (by[i][j + 1] + bext + dj1)) {
j++;
*dGplex += bext;
state = 4;
traced = 1;
break;
}
type2 = pair[S2[j + 1]][S1[i]];
if (type2 &&
by[i][j] == (c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0) + dj1)) {
int temp;
temp = k;
k = i;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGplex += bopen + bext + (type2 > 2 ? P->TerminalAU : 0);
*dGy += dj1;
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 5:
if (inx[i][j] == (inx[i - 1][j] + iext_ass + di1)) {
i--;
*dGplex += iext_ass;
*dGx += di1;
state = 5;
traced = 1;
break;
}
type2 = pair[S2[j + 1]][S1[i - 1]];
if (type2 &&
inx[i][j] ==
(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 +
dj1)) {
*dGplex += P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s;
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di1;
*dGy += dj1;
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 6:
if (iny[i][j] == (iny[i][j + 1] + iext_ass + dj1)) {
j++;
*dGplex += iext_ass;
*dGx += dj1;
state = 6;
traced = 1;
break;
}
type2 = pair[S2[j + 1]][S1[i - 1]];
if (type2 &&
iny[i][j] ==
(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s + di1 +
dj1)) {
*dGplex += P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s;
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
*dGx += di1;
*dGy += dj1;
i = k;
j = l;
state = 1;
traced = 1;
break;
}
}
}
if (!traced) {
idiff = i_pos - (n3 - 10 - i);
jdiff = j_pos + j - 11;
E = c[i][j];
/**
*** if (i>1) {E -= P->dangle5[type][SS1[i-1]]; *dG+=P->dangle5[type][SS1[i-1]];*dGplex+=P->dangle5[type][SS1[i-1]];}
*** if (j<n4){E -= P->dangle3[type][SS2[j+1]]; *dG+=P->dangle3[type][SS2[j+1]];*dGplex+=P->dangle3[type][SS2[j+1]];}
*** if (type>2) {E -= P->TerminalAU; *dG+=P->TerminalAU;*dGplex+=P->TerminalAU;}
**/
int correction;
correction = E_ExtLoop(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P);
*dG += correction;
*dGplex += correction;
E -= correction;
/*
* if (E != P->DuplexInit+access_s1[1][idiff]+access_s2[1][jdiff]) {
* vrna_message_error("backtrack failed in second fold duplex");
* }
*/
if (E != P->DuplexInit) {
vrna_message_error("backtrack failed in second fold duplex");
} else {
*dG += P->DuplexInit;
*dGplex += P->DuplexInit;
*dGx += 0; /* access_s1[1][idiff]; */
*dGy += 0; /* access_s2[1][jdiff]; */
st1[i - 1] = '(';
st2[j - 1] = ')';
}
}
if (i > 11)
i--;
if (j < n4 - 10)
j++;
struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2);
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j0; k <= j; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j0 - 1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1);
free(st2);
return struc;
}
duplexT **
Lduplexfold_XS(const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int threshold,
const int alignment_length,
const int delta,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
/**
*** See variable definition in fduplexfold_XS
**/
int i, j;
int bopen = b_b;
int bext = b_a;
int iopen = il_b;
int iext_s = 2 * il_a;
int iext_ass = 50 + il_a;
int min_colonne = INF;
int i_length;
int max_pos;
int max_pos_j;
int min_j_colonne;
int max = INF;
int *position;
int *position_j;
int maxPenalty[4];
int **DJ;
/**
*** 1D array corresponding to the standard 2d recursion matrix
*** Makes the computation 20% faster
**/
int *SA;
vrna_md_t md;
/**
*** variable initialization
**/
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
/**
*** Sequence encoding
**/
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
update_dfold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
encode_seqs(s1, s2);
/**
*** Position of the high score on the target and query sequence
**/
position = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int));
position_j = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int));
/**
*** extension penalty, computed only once, further reduce the computation time
**/
maxPenalty[0] = (int)-1 * P->stack[2][2] / 2;
maxPenalty[1] = (int)-1 * P->stack[2][2];
maxPenalty[2] = (int)-3 * P->stack[2][2] / 2;
maxPenalty[3] = (int)-2 * P->stack[2][2];
DJ = (int **)vrna_alloc(4 * sizeof(int *));
DJ[0] = (int *)vrna_alloc(n2 * sizeof(int));
DJ[1] = (int *)vrna_alloc(n2 * sizeof(int));
DJ[2] = (int *)vrna_alloc(n2 * sizeof(int));
DJ[3] = (int *)vrna_alloc(n2 * sizeof(int));
j = n2 - 9;
while (--j > 10) {
DJ[0][j] = 0.5 *
(access_s2[5][j + 4] - access_s2[4][j + 4] + access_s2[5][j] - access_s2[4][j - 1]);
DJ[1][j] = 0.5 *
(access_s2[5][j + 5] - access_s2[4][j + 5] + access_s2[5][j + 1] - access_s2[4][j]) +
DJ[0][j];
DJ[2][j] = 0.5 *
(access_s2[5][j + 6] - access_s2[4][j + 6] + access_s2[5][j + 2] -
access_s2[4][j + 1]) +
DJ[1][j];
DJ[3][j] = 0.5 *
(access_s2[5][j + 7] - access_s2[4][j + 7] + access_s2[5][j + 3] -
access_s2[4][j + 2]) +
DJ[2][j];
/*
* DJ[0][j] = access_s2[5][j+4] - access_s2[4][j+4] ;
* DJ[1][j] = access_s2[5][j+5] - access_s2[4][j+5] + DJ[0][j];
* DJ[2][j] = access_s2[5][j+6] - access_s2[4][j+6] + DJ[1][j];
* DJ[3][j] = access_s2[5][j+7] - access_s2[4][j+7] + DJ[2][j];
* DJ[0][j] = MIN2(DJ[0][j],maxPenalty[0]);
* DJ[1][j] = MIN2(DJ[1][j],maxPenalty[1]);
* DJ[2][j] = MIN2(DJ[2][j],maxPenalty[2]);
* DJ[3][j] = MIN2(DJ[3][j],maxPenalty[3]);
*/
}
/**
*** instead of having 4 2-dim arrays we use a unique 1-dim array
*** The mapping 2d -> 1D is done based ont the macro
*** LCI(i,j,l) ((i )*l + j)
*** LINI(i,j,l) ((i + 5)*l + j)
*** LBXI(i,j,l) ((i + 10)*l + j)
*** LBYI(i,j,l) ((i + 15)*l + j)
*** LINIX(i,j,l) ((i + 20)*l + j)
*** LINIY(i,j,l) ((i + 25)*l + j)
***
*** SA has a length of 5 (number of columns we look back) *
*** * 6 (number of structures we look at) *
*** * length of the sequence
**/
SA = (int *)vrna_alloc(sizeof(int) * 5 * 6 * (n2 + 5));
for (j = n2 + 4; j >= 0; j--) {
SA[(j *
30)] =
SA[(j * 30) + 1] = SA[(j * 30) + 2] = SA[(j * 30) + 3] = SA[(j * 30) + 4] = INF;
SA[(j * 30) +
5] =
SA[(j * 30) + 1 +
5] =
SA[(j * 30) + 2 + 5] = SA[(j * 30) + 3 + 5] = SA[(j * 30) + 4 + 5] = INF;
SA[(j * 30) +
10] =
SA[(j * 30) + 1 +
10] =
SA[(j * 30) + 2 + 10] = SA[(j * 30) + 3 + 10] = SA[(j * 30) + 4 + 10] = INF;
SA[(j * 30) +
15] =
SA[(j * 30) + 1 +
15] =
SA[(j * 30) + 2 + 15] = SA[(j * 30) + 3 + 15] = SA[(j * 30) + 4 + 15] = INF;
SA[(j * 30) +
20] =
SA[(j * 30) + 1 +
20] =
SA[(j * 30) + 2 + 20] = SA[(j * 30) + 3 + 20] = SA[(j * 30) + 4 + 20] = INF;
SA[(j * 30) +
25] =
SA[(j * 30) + 1 +
25] =
SA[(j * 30) + 2 + 25] = SA[(j * 30) + 3 + 25] = SA[(j * 30) + 4 + 25] = INF;
}
i = 10;
i_length = n1 - 9;
while (i < i_length) {
int di1, di2, di3, di4;
int idx = i % 5;
int idx_1 = (i - 1) % 5;
int idx_2 = (i - 2) % 5;
int idx_3 = (i - 3) % 5;
int idx_4 = (i - 4) % 5;
di1 = 0.5 * (access_s1[5][i + 4] - access_s1[4][i + 4] + access_s1[5][i] - access_s1[4][i - 1]);
di2 = 0.5 *
(access_s1[5][i + 3] - access_s1[4][i + 3] + access_s1[5][i - 1] - access_s1[4][i - 2]) +
di1;
di3 = 0.5 *
(access_s1[5][i + 2] - access_s1[4][i + 2] + access_s1[5][i - 2] - access_s1[4][i - 3]) +
di2;
di4 = 0.5 *
(access_s1[5][i + 1] - access_s1[4][i + 1] + access_s1[5][i - 3] - access_s1[4][i - 4]) +
di3;
/*
* di1 = access_s1[5][i] - access_s1[4][i-1];
* di2 = access_s1[5][i-1] - access_s1[4][i-2] + di1;
* di3 = access_s1[5][i-2] - access_s1[4][i-3] + di2;
* di4 = access_s1[5][i-3] - access_s1[4][i-4] + di3;
* di1=MIN2(di1,maxPenalty[0]);
* di2=MIN2(di2,maxPenalty[1]);
* di3=MIN2(di3,maxPenalty[2]);
* di4=MIN2(di4,maxPenalty[3]);
*/
j = n2 - 9;
while (--j > 9) {
int dj1, dj2, dj3, dj4;
dj1 = DJ[0][j];
dj2 = DJ[1][j];
dj3 = DJ[2][j];
dj4 = DJ[3][j];
int type2, type, temp;
type = pair[S1[i]][S2[j]];
/**
*** Start duplex
**/
/* SA[LCI(idx,j,n2)] = type ? P->DuplexInit + access_s1[1][i] + access_s2[1][j] : INF; */
SA[LCI(idx, j, n2)] = type ? P->DuplexInit : INF;
/**
*** update lin bx by linx liny matrix
**/
type2 = pair[S2[j + 1]][S1[i - 1]];
/**
*** start/extend interior loop
**/
SA[LINI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->mismatchI[type2][SS2[j]][SS1[i]] + di1 + dj1 + iopen + iext_s,
SA[LINI(idx_1, j, n2)] + iext_ass + di1);
/**
*** start/extend nx1 target
*** use same type2 as for in
**/
SA[LINIX(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + di1 + dj1 + iopen + iext_s,
SA[LINIX(idx_1, j, n2)] + iext_ass + di1);
/**
*** start/extend 1xn target
*** use same type2 as for in
**/
SA[LINIY(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + di1 + dj1 + iopen + iext_s,
SA[LINIY(idx, j + 1, n2)] + iext_ass + dj1);
/**
*** extend interior loop
**/
SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)], SA[LINI(idx, j + 1, n2)] + iext_ass + dj1);
SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)],
SA[LINI(idx_1, j + 1, n2)] + iext_s + di1 + dj1);
/**
*** start/extend bulge target
**/
type2 = pair[S2[j]][S1[i - 1]];
SA[LBXI(idx, j, n2)] = MIN2(SA[LBXI(idx_1, j, n2)] + bext + di1,
SA[LCI(idx_1, j,
n2)] + bopen + bext +
(type2 > 2 ? P->TerminalAU : 0) + di1);
/**
*** start/extend bulge query
**/
type2 = pair[S2[j + 1]][S1[i]];
SA[LBYI(idx, j, n2)] = MIN2(SA[LBYI(idx, j + 1, n2)] + bext + dj1,
SA[LCI(idx, j + 1,
n2)] + bopen + bext +
(type2 > 2 ? P->TerminalAU : 0) + dj1);
/**
***end update recursion
**/
if (!type)
continue; /**
*** stack extension
**/
SA[LCI(idx, j, n2)] += E_ExtLoop(type, SS1[i - 1], SS2[j + 1], P);
/**
*** stack extension
**/
if ((type2 = pair[S1[i - 1]][S2[j + 1]]))
SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->stack[rtype[type]][type2] + di1 + dj1,
SA[LCI(idx, j, n2)]);
/**
*** 1x0 / 0x1 stack extension
**/
if ((type2 = pair[S1[i - 1]][S2[j + 2]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_1, j + 2,
n2)] + P->bulge[1] + P->stack[rtype[type]][type2] + di1 + dj2,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 2]][S2[j + 1]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_2, j + 1,
n2)] + P->bulge[1] + P->stack[type2][rtype[type]] + di2 + dj1,
SA[LCI(idx, j, n2)]);
}
/**
*** 1x1 / 2x2 stack extension
**/
if ((type2 = pair[S1[i - 2]][S2[j + 2]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_2, j + 2,
n2)] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + di2 + dj2,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 3]][S2[j + 3]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_3, j + 3,
n2)] +
P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j +
2]] + di3 + dj3,
SA[LCI(idx, j, n2)]);
}
/**
*** 1x2 / 2x1 stack extension
*** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to
*** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]]
**/
if ((type2 = pair[S1[i - 3]][S2[j + 2]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_3, j + 2,
n2)] +
P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + di3 + dj2,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 2]][S2[j + 3]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_2, j + 3,
n2)] +
P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + di2 + dj3,
SA[LCI(idx, j, n2)]);
}
/**
*** 2x3 / 3x2 stack extension
**/
if ((type2 = pair[S1[i - 4]][S2[j + 3]])) {
SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_4, j + 3, n2)] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di4 + dj3,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 3]][S2[j + 4]])) {
SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 4, n2)] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + di3 + dj4,
SA[LCI(idx, j, n2)]);
}
/**
*** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3
**/
/**
*** 3x3 or more
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINI(idx_3, j + 3,
n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + di3 + dj3,
SA[LCI(idx, j, n2)]);
/**
*** 2xn or more
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINI(idx_4, j + 2,
n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di4 + dj2,
SA[LCI(idx, j, n2)]);
/**
*** nx2 or more
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINI(idx_2, j + 4,
n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + di2 + dj4,
SA[LCI(idx, j, n2)]);
/**
*** nx1 n>2
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINIX(idx_3, j + 1,
n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + di3 + dj1,
SA[LCI(idx, j, n2)]);
/**
*** 1xn n>2
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINIY(idx_1, j + 3,
n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + dj3 + di1,
SA[LCI(idx, j, n2)]);
/**
*** nx0 n>1
**/
int bAU;
bAU = (type > 2 ? P->TerminalAU : 0);
SA[LCI(idx, j,
n2)] = MIN2(SA[LBXI(idx_2, j + 1, n2)] + di2 + dj1 + bext + bAU, SA[LCI(idx, j, n2)]);
/**
*** 0xn n>1
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LBYI(idx_1, j + 2, n2)] + di1 + dj2 + bext + bAU, SA[LCI(idx, j, n2)]);
temp = min_colonne;
/**
*** (type>2?P->TerminalAU:0)+
*** P->dangle3[rtype[type]][SS1[i+1]]+
*** P->dangle5[rtype[type]][SS2[j-1]],
**/
/* remove this line printf("LCI %d:%d %d\t",i,j,SA[LCI(idx,j,n2)]); */
/* remove this line printf("LI %d:%d %d\t",i,j, SA[LINI(idx,j,n2)]); */
min_colonne = MIN2(SA[LCI(idx, j, n2)] + E_ExtLoop(rtype[type], SS2[j - 1], SS1[i + 1], P),
min_colonne);
if (temp > min_colonne)
min_j_colonne = j;
/* ---------------------------------------------------------------------end update */
}
if (max >= min_colonne) {
max = min_colonne;
max_pos = i;
max_pos_j = min_j_colonne;
}
position[i + delta] = min_colonne;
min_colonne = INF;
position_j[i + delta] = min_j_colonne;
/* remove this line printf("\n"); */
i++;
}
/* printf("MAX: %d",max); */
free(S1);
free(S2);
free(SS1);
free(SS2);
free(SA);
if (max < threshold) {
find_max_XS(position,
position_j,
delta,
threshold,
alignment_length,
s1,
s2,
access_s1,
access_s2,
fast,
il_a,
il_b,
b_a,
b_b);
}
if (max < INF) {
plot_max_XS(max,
max_pos,
max_pos_j,
alignment_length,
s1,
s2,
access_s1,
access_s2,
fast,
il_a,
il_b,
b_a,
b_b);
}
for (i = 0; i <= 3; i++)
free(DJ[i]);
free(DJ);
free(position);
free(position_j);
return NULL;
}
PRIVATE void
find_max_XS(const int *position,
const int *position_j,
const int delta,
const int threshold,
const int alignment_length,
const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
int pos = n1 - 9;
if (fast == 1) {
while (10 < pos--) {
int temp_min = 0;
if (position[pos + delta] < (threshold)) {
int search_range;
search_range = delta + 1;
while (--search_range)
if (position[pos + delta - search_range] <= position[pos + delta - temp_min])
temp_min = search_range;
pos -= temp_min;
int max_pos_j;
max_pos_j = position_j[pos + delta];
int max;
max = position[pos + delta];
printf("target upper bound %d: query lower bound %d (%5.2f) \n",
pos - 10,
max_pos_j - 10,
((double)max) / 100);
pos = MAX2(10, pos + temp_min - delta);
}
}
} else if (fast == 2) {
pos = n1 - 9;
while (10 < pos--) {
int temp_min = 0;
if (position[pos + delta] < (threshold)) {
int search_range;
search_range = delta + 1;
while (--search_range)
if (position[pos + delta - search_range] <= position[pos + delta - temp_min])
temp_min = search_range;
pos -= temp_min;
int max_pos_j;
max_pos_j = position_j[pos + delta];
/* max_pos_j und pos entsprechen die realen position
* in der erweiterten sequenz.
* pos=1 -> position 1 in the sequence (and not 0 like in C)
* max_pos_j -> position 1 in the sequence ( not 0 like in C)
*/
int alignment_length2;
alignment_length2 = MIN2(n1, n2);
int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */
int end_t = MIN2(n1 - 10, pos + 1);
int begin_q = MAX2(11, max_pos_j - 1); /* 10 */
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20));
strcpy(s3, "NNNNNNNNNN");
strcpy(s4, "NNNNNNNNNN");
strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1);
strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1);
strcat(s3, "NNNNNNNNNN");
strcat(s4, "NNNNNNNNNN");
s3[end_t - begin_t + 1 + 20] = '\0';
s4[end_q - begin_q + 1 + 20] = '\0';
duplexT test;
test = fduplexfold_XS(s3,
s4,
access_s1,
access_s2,
end_t,
begin_q,
threshold,
il_a,
il_b,
b_a,
b_b);
if (test.energy * 100 < threshold) {
int l1 = strchr(test.structure, '&') - test.structure;
printf(
" %s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n",
test.structure,
begin_t - 10 + test.i - l1 - 10,
begin_t - 10 + test.i - 1 - 10,
begin_q - 10 + test.j - 1 - 10,
(begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10,
test.ddG,
test.energy,
test.opening_backtrack_x,
test.opening_backtrack_y,
test.energy_backtrack,
pos - 10,
max_pos_j - 10,
((double)position[pos + delta]) / 100);
pos = MAX2(10, pos + temp_min - delta);
free(test.structure);
}
free(s3);
free(s4);
}
}
} else {
pos = n1 - 9;
while (pos-- > 10) {
int temp_min = 0;
if (position[pos + delta] < (threshold)) {
int search_range;
search_range = delta + 1;
while (--search_range)
if (position[pos + delta - search_range] <= position[pos + delta - temp_min])
temp_min = search_range;
pos -= temp_min; /* position on i */
int max_pos_j;
max_pos_j = position_j[pos + delta]; /* position on j */
int begin_t = MAX2(11, pos - alignment_length);
int end_t = MIN2(n1 - 10, pos + 1);
int begin_q = MAX2(11, max_pos_j - 1);
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length - 1);
int i_flag;
int j_flag;
i_flag = (end_t == pos + 1 ? 1 : 0);
j_flag = (begin_q == max_pos_j - 1 ? 1 : 0);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2));
strncpy(s3, (s1 + begin_t), end_t - begin_t + 1);
strncpy(s4, (s2 + begin_q), end_q - begin_q + 1);
s3[end_t - begin_t + 1] = '\0';
s4[end_q - begin_q + 1] = '\0';
duplexT test;
test =
duplexfold_XS(s3, s4, access_s1, access_s2, pos, max_pos_j, threshold, i_flag, j_flag);
if (test.energy * 100 < threshold) {
printf("%s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) i:%d,j:%d <%5.2f>\n",
test.structure,
test.tb,
test.te,
test.qb,
test.qe,
test.ddG,
test.energy,
test.dG1,
test.dG2,
pos - 10,
max_pos_j - 10,
((double)position[pos + delta]) / 100);
pos = MAX2(10, pos + temp_min - delta);
}
free(s3);
free(s4);
free(test.structure);
}
}
}
}
#if 0
PRIVATE int
compare(const void *sub1,
const void *sub2)
{
int d;
if (((duplexT *)sub1)->ddG > ((duplexT *)sub2)->ddG)
return 1;
if (((duplexT *)sub1)->ddG < ((duplexT *)sub2)->ddG)
return -1;
d = ((duplexT *)sub1)->i - ((duplexT *)sub2)->i;
if (d != 0)
return d;
return ((duplexT *)sub1)->j - ((duplexT *)sub2)->j;
}
#endif
PRIVATE void
plot_max_XS(const int max,
const int max_pos,
const int max_pos_j,
const int alignment_length,
const char *s1,
const char *s2,
const int **access_s1,
const int **access_s2,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
if (fast == 1) {
printf("target upper bound %d: query lower bound %d (%5.2f)\n", max_pos - 3, max_pos_j,
((double)max) / 100);
} else if (fast == 2) {
int alignment_length2;
alignment_length2 = MIN2(n1, n2);
int begin_t = MAX2(11, max_pos - alignment_length2 + 1); /* 10 */
int end_t = MIN2(n1 - 10, max_pos + 1);
int begin_q = MAX2(11, max_pos_j - 1); /* 10 */
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20));
strcpy(s3, "NNNNNNNNNN");
strcpy(s4, "NNNNNNNNNN");
strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1);
strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1);
strcat(s3, "NNNNNNNNNN");
strcat(s4, "NNNNNNNNNN");
s3[end_t - begin_t + 1 + 20] = '\0';
s4[end_q - begin_q + 1 + 20] = '\0';
duplexT test;
test = fduplexfold_XS(s3, s4, access_s1, access_s2, end_t, begin_q, INF, il_a, il_b, b_a, b_b);
int l1 = strchr(test.structure, '&') - test.structure;
printf("%s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n",
test.structure,
begin_t - 10 + test.i - l1 - 10,
begin_t - 10 + test.i - 1 - 10,
begin_q - 10 + test.j - 1 - 10,
(begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10,
test.ddG,
test.energy,
test.opening_backtrack_x,
test.opening_backtrack_y,
test.energy_backtrack,
max_pos - 10,
max_pos_j - 10,
(double)max / 100);
free(s3);
free(s4);
free(test.structure);
} else {
int begin_t = MAX2(11, max_pos - alignment_length);
int end_t = MIN2(n1 - 10, max_pos + 1);
int begin_q = MAX2(11, max_pos_j - 1);
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length - 1);
int i_flag;
int j_flag;
i_flag = (end_t == max_pos + 1 ? 1 : 0);
j_flag = (begin_q == max_pos_j - 1 ? 1 : 0);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2)); /* +1 for \0 +1 for distance */
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2));
strncpy(s3, (s1 + begin_t - 1), end_t - begin_t + 1); /* -1 to go from */
strncpy(s4, (s2 + begin_q - 1), end_q - begin_q + 1); /* -1 to go from */
s3[end_t - begin_t + 1] = '\0'; /* */
s4[end_q - begin_q + 1] = '\0';
duplexT test;
test = duplexfold_XS(s3, s4, access_s1, access_s2, max_pos, max_pos_j, INF, i_flag, j_flag);
printf("%s %3d,%-3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f) i:%d,j:%d <%5.2f>\n",
test.structure,
test.tb,
test.te,
test.qb,
test.qe,
test.ddG,
test.energy,
test.dG1,
test.dG2,
max_pos - 10,
max_pos_j - 10,
(double)max / 100);
free(s3);
free(s4);
free(test.structure);
}
}
/*---------------------------------------------------------duplexfold----------------------------------------------------------------------------------*/
PRIVATE duplexT
duplexfold(const char *s1,
const char *s2,
const int extension_cost)
{
int i, j, l1, Emin = INF, i_min = 0, j_min = 0;
char *struc;
duplexT mfe;
vrna_md_t md;
n3 = (int)strlen(s1);
n4 = (int)strlen(s2);
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
update_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
for (i = 0; i <= n3; i++)
c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
encode_seqs(s1, s2);
for (i = 1; i <= n3; i++) {
for (j = n4; j > 0; j--) {
int type, type2, E, k, l;
type = pair[S1[i]][S2[j]];
c[i][j] = type ? P->DuplexInit + 2 * extension_cost : INF;
if (!type)
continue;
/**
*** if (i>1) c[i][j] += P->dangle5[type][SS1[i-1]]+ extension_cost;
*** if (j<n4) c[i][j] += P->dangle3[type][SS2[j+1]]+ extension_cost;
*** if (type>2) c[i][j] += P->TerminalAU;
**/
c[i][j] += E_ExtLoop(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P);
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n4; l++) {
if (i - k + l - j - 2 > MAXLOOP)
break;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1],
P) + (i - k + l - j) * extension_cost;
c[i][j] = MIN2(c[i][j], c[k][l] + E);
}
}
E = c[i][j];
/**
*** if (i<n3) E += P->dangle3[rtype[type]][SS1[i+1]]+extension_cost;
*** if (j>1) E += P->dangle5[rtype[type]][SS2[j-1]]+extension_cost;
*** if (type>2) E += P->TerminalAU;
***
**/
E += E_ExtLoop(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n3) ? SS1[i + 1] : -1, P);
if (E < Emin) {
Emin = E;
i_min = i;
j_min = j;
}
}
}
struc = backtrack(i_min, j_min, extension_cost);
if (i_min < n3)
i_min++;
if (j_min > 1)
j_min--;
l1 = strchr(struc, '&') - struc;
int size;
size = strlen(struc) - 1;
Emin -= size * (extension_cost);
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (double)Emin / 100.;
mfe.structure = struc;
for (i = 0; i <= n3; i++)
free(c[i]);
free(c);
free(S1);
free(S2);
free(SS1);
free(SS2);
return mfe;
}
PRIVATE char *
backtrack(int i,
int j,
const int extension_cost)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
char *st1, *st2, *struc;
st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1));
i0 = MIN2(i + 1, n3);
j0 = MAX2(j - 1, 1);
while (i > 0 && j <= n4) {
E = c[i][j];
traced = 0;
st1[i - 1] = '(';
st2[j - 1] = ')';
type = pair[S1[i]][S2[j]];
if (!type)
vrna_message_error("backtrack failed in fold duplex");
for (k = i - 1; k > 0 && k > i - MAXLOOP - 2; k--) {
for (l = j + 1; l <= n4; l++) {
int LE;
if (i - k + l - j - 2 > MAXLOOP)
break;
type2 = pair[S1[k]][S2[l]];
if (!type2)
continue;
LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type],
SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1],
P) + (i - k + l - j) * extension_cost;
if (E == c[k][l] + LE) {
traced = 1;
i = k;
j = l;
break;
}
}
if (traced)
break;
}
if (!traced) {
E -= E_ExtLoop(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P);
/**
*** if (i>1) E -= P->dangle5[type][SS1[i-1]]+extension_cost;
*** if (j<n4) E -= P->dangle3[type][SS2[j+1]]+extension_cost;
*** if (type>2) E -= P->TerminalAU;
**/
if (E != P->DuplexInit + 2 * extension_cost)
vrna_message_error("backtrack failed in fold duplex");
else
break;
}
}
if (i > 1)
i--;
if (j < n4)
j++;
struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2);
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j0; k <= j; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j0 - 1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1);
free(st2);
return struc;
}
PRIVATE duplexT
fduplexfold(const char *s1,
const char *s2,
const int extension_cost,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
int i, j, Emin, i_min, j_min, l1;
duplexT mfe;
char *struc;
int bopen = b_b;
int bext = b_a + extension_cost;
int iopen = il_b;
int iext_s = 2 * (il_a + extension_cost); /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */
int iext_ass = 50 + il_a + extension_cost; /* iext_ass assymetric extension of interior loop, either on i or on j side. */
int min_colonne = INF; /* enthaelt das maximum einer kolonne */
int i_length;
int max_pos; /* get position of the best hit */
int max_pos_j;
int temp = INF;
int min_j_colonne;
int max = INF;
vrna_md_t md;
/* FOLLOWING NEXT 4 LINE DEFINES AN ARRAY CONTAINING POSITION OF THE SUBOPT IN S1 */
n3 = (int)strlen(s1);
n4 = (int)strlen(s2);
/* delta_check is the minimal distance allowed for two hits to be accepted */
/* if both hits are closer, reject the smaller ( in term of position) hits */
/* i want to implement a function that, given a position in a long sequence and a small sequence, */
/* duplexfold them at this position and report the result at the command line */
/* for this i first need to rewrite backtrack in order to remove the printf functio */
/* END OF DEFINITION FOR NEEDED SUBOPT DATA */
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
update_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
/*local c array initialization---------------------------------------------*/
c = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
in = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
bx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
by = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
inx = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
iny = (int **)vrna_alloc(sizeof(int *) * (n3 + 1));
for (i = 0; i <= n3; i++) {
c[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
in[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
bx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
by[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
inx[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
iny[i] = (int *)vrna_alloc(sizeof(int) * (n4 + 1));
}
/*-------------------------------------------------------------------------*/
/*end of array initialisation----------------------------------*/
/*maybe int *** would be better*/
encode_seqs(s1, s2);
/* ------------------------------------------matrix initialisierung */
for (i = 0; i < n3; i++) {
for (j = 0; j < n4; j++) {
in[i][j] = INF; /* no in before 1 */
c[i][j] = INF; /* no bulge and no in before n2 */
bx[i][j] = INF; /* no bulge before 1 */
by[i][j] = INF;
inx[i][j] = INF; /* no bulge before 1 */
iny[i][j] = INF;
}
}
/*--------------------------------------------------------local array*/
/* -------------------------------------------------------------matrix initialisierung */
i = 11;
i_length = n3 - 9;
while (i < i_length) {
j = n4 - 9;
min_colonne = INF;
while (10 < --j) {
int type, type2;
type = pair[S1[i]][S2[j]];
/**
*** Start duplex
**/
c[i][j] = type ? P->DuplexInit + 2 * extension_cost : INF;
/**
*** update lin bx by linx liny matrix
**/
type2 = pair[S2[j + 1]][S1[i - 1]];
/**
*** start/extend interior loop
**/
in[i][j] =
MIN2(c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s,
in[i - 1][j] + iext_ass);
/**
*** start/extend nx1 target
*** use same type2 as for in
**/
inx[i][j] = MIN2(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s,
inx[i - 1][j] + iext_ass);
/**
*** start/extend 1xn target
*** use same type2 as for in
**/
iny[i][j] = MIN2(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s,
iny[i][j + 1] + iext_ass);
/**
*** extend interior loop
**/
in[i][j] = MIN2(in[i][j], in[i][j + 1] + iext_ass);
in[i][j] = MIN2(in[i][j], in[i - 1][j + 1] + iext_s);
/**
*** start/extend bulge target
**/
type2 = pair[S2[j]][S1[i - 1]];
bx[i][j] =
MIN2(bx[i - 1][j] + bext, c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0));
/**
*** start/extend bulge query
**/
type2 = pair[S2[j + 1]][S1[i]];
by[i][j] =
MIN2(by[i][j + 1] + bext, c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0));
/**
***end update recursion
***######################## Start stack extension##############################
**/
if (!type)
continue;
c[i][j] += E_ExtLoop(type, SS1[i - 1], SS2[j + 1], P) + 2 * extension_cost;
/**
*** stack extension
**/
if ((type2 = pair[S1[i - 1]][S2[j + 1]]))
c[i][j] =
MIN2(c[i - 1][j + 1] + P->stack[rtype[type]][type2] + 2 * extension_cost, c[i][j]);
/**
*** 1x0 / 0x1 stack extension
**/
type2 = pair[S1[i - 1]][S2[j + 2]];
c[i][j] = MIN2(
c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + 3 * extension_cost,
c[i][j]);
type2 = pair[S1[i - 2]][S2[j + 1]];
c[i][j] = MIN2(
c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + 3 * extension_cost,
c[i][j]);
/**
*** 1x1 / 2x2 stack extension
**/
type2 = pair[S1[i - 2]][S2[j + 2]];
c[i][j] = MIN2(
c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + 4 * extension_cost,
c[i][j]);
type2 = pair[S1[i - 3]][S2[j + 3]];
c[i][j] =
MIN2(c[i - 3][j + 3] +
P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 6 * extension_cost,
c[i][j]);
/**
*** 1x2 / 2x1 stack extension
*** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to
*** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]]
**/
type2 = pair[S1[i - 3]][S2[j + 2]];
c[i][j] =
MIN2(
c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + 5 * extension_cost,
c[i][j]);
type2 = pair[S1[i - 2]][S2[j + 3]];
c[i][j] =
MIN2(
c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 5 * extension_cost,
c[i][j]);
/**
*** 2x3 / 3x2 stack extension
**/
if ((type2 = pair[S1[i - 4]][S2[j + 3]])) {
c[i][j] = MIN2(c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost,
c[i][j]);
}
if ((type2 = pair[S1[i - 3]][S2[j + 4]])) {
c[i][j] = MIN2(c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost,
c[i][j]);
}
/**
*** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3
**/
/**
*** 3x3 or more
**/
c[i][j] = MIN2(
in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + 2 * extension_cost,
c[i][j]);
/**
*** 2xn or more
**/
c[i][j] = MIN2(
in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost,
c[i][j]);
/**
*** nx2 or more
**/
c[i][j] = MIN2(
in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost,
c[i][j]);
/**
*** nx1 n>2
**/
c[i][j] = MIN2(
inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost,
c[i][j]);
/**
*** 1xn n>2
**/
c[i][j] = MIN2(
iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost,
c[i][j]);
/**
*** nx0 n>1
**/
int bAU;
bAU = (type > 2 ? P->TerminalAU : 0);
c[i][j] = MIN2(bx[i - 2][j + 1] + 2 * extension_cost + bext + bAU, c[i][j]);
/**
*** 0xn n>1
**/
c[i][j] = MIN2(by[i - 1][j + 2] + 2 * extension_cost + bext + bAU, c[i][j]);
temp = min_colonne;
min_colonne = MIN2(c[i][j] + E_ExtLoop(rtype[type], SS2[j - 1], SS1[i + 1],
P) + 2 * extension_cost,
min_colonne);
if (temp > min_colonne)
min_j_colonne = j;
/* ---------------------------------------------------------------------end update */
}
if (max >= min_colonne) {
max = min_colonne;
max_pos = i;
max_pos_j = min_j_colonne;
}
i++;
}
Emin = max;
i_min = max_pos;
j_min = max_pos_j;
int dGe;
dGe = 0;
struc = fbacktrack(i_min, j_min, extension_cost, il_a, il_b, b_a, b_b, &dGe);
if (i_min < n3 - 10)
i_min++;
if (j_min > 11)
j_min--;
l1 = strchr(struc, '&') - struc;
int size;
size = strlen(struc) - 1;
Emin -= size * (extension_cost);
mfe.i = i_min;
mfe.j = j_min;
mfe.energy = (double)Emin / 100.;
mfe.energy_backtrack = (double)dGe / 100.;
mfe.structure = struc;
free(S1);
free(S2);
free(SS1);
free(SS2);
for (i = 0; i <= n3; i++) {
free(c[i]);
free(in[i]);
free(bx[i]);
free(by[i]);
free(inx[i]);
free(iny[i]);
}
free(c);
free(in);
free(bx);
free(by);
free(inx);
free(iny);
return mfe;
}
PRIVATE char *
fbacktrack(int i,
int j,
const int extension_cost,
const int il_a,
const int il_b,
const int b_a,
const int b_b,
int *dG)
{
/* backtrack structure going backwards from i, and forwards from j
* return structure in bracket notation with & as separator */
int k, l, type, type2, E, traced, i0, j0;
char *st1, *st2, *struc;
int bopen = b_b;
int bext = b_a + extension_cost;
int iopen = il_b;
int iext_s = 2 * (il_a + extension_cost); /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */
int iext_ass = 50 + il_a + extension_cost; /* iext_ass assymetric extension of interior loop, either on i or on j side. */
st1 = (char *)vrna_alloc(sizeof(char) * (n3 + 1));
st2 = (char *)vrna_alloc(sizeof(char) * (n4 + 1));
i0 = MIN2(i + 1, n3 - 10);
j0 = MAX2(j - 1, 11);
int state;
state = 1; /* we start backtracking from a a pair , i.e. c-matrix */
/* state 1 -> base pair, c
* state 2 -> interior loop, in
* state 3 -> bx loop, bx
* state 4 -> by loop, by
*/
traced = 1;
k = i;
l = j;
type = pair[S1[i]][S2[j]];
*dG += E_ExtLoop(rtype[type], SS2[j - 1], SS1[i + 1], P);
/* (type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]]; */
while (i > 10 && j <= n4 - 9 && traced) {
traced = 0;
switch (state) {
case 1:
type = pair[S1[i]][S2[j]];
int bAU;
bAU = (type > 2 ? P->TerminalAU : 0);
if (!type)
vrna_message_error("backtrack failed in fold duplex");
type2 = pair[S1[i - 1]][S2[j + 1]];
if (type2 &&
c[i][j] == (c[i - 1][j + 1] + P->stack[rtype[type]][type2] + 2 * extension_cost)) {
k = i - 1;
l = j + 1;
(*dG) += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 1]][S2[j + 2]];
if (type2 &&
c[i][j] ==
(c[i - 1][j + 2] + P->bulge[1] + P->stack[rtype[type]][type2] + 3 * extension_cost)) {
k = i - 1;
l = j + 2;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 2]][S2[j + 1]];
if (type2 &&
c[i][j] ==
(c[i - 2][j + 1] + P->bulge[1] + P->stack[type2][rtype[type]] + 3 * extension_cost)) {
k = i - 2;
l = j + 1;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 2]][S2[j + 2]];
if (type2 &&
c[i][j] ==
(c[i - 2][j + 2] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + 4 *
extension_cost)) {
k = i - 2;
l = j + 2;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 3]][S2[j + 3]];
if (type2 &&
c[i][j] ==
(c[i - 3][j + 3] +
P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 6 *
extension_cost)) {
k = i - 3;
l = j + 3;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 3]][S2[j + 2]];
if (type2 &&
c[i][j] ==
(c[i - 3][j + 2] + P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] +
5 *
extension_cost)) {
k = i - 3;
l = j + 2;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 2]][S2[j + 3]];
if (type2 &&
c[i][j] ==
(c[i - 2][j + 3] + P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] +
5 *
extension_cost)) {
k = i - 2;
l = j + 3;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 4]][S2[j + 3]];
if (type2 && c[i][j] == (c[i - 4][j + 3] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 *
extension_cost)) {
k = i - 4;
l = j + 3;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
type2 = pair[S1[i - 3]][S2[j + 4]];
if (type2 && c[i][j] == (c[i - 3][j + 4] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 *
extension_cost)) {
k = i - 3;
l = j + 4;
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
st1[i - 1] = '(';
st2[j - 1] = ')';
i = k;
j = l;
state = 1;
traced = 1;
break;
}
if (c[i][j] ==
(in[i - 3][j + 3] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 *
extension_cost +
2 * iext_s)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 3;
j = j + 3;
state = 2;
traced = 1;
break;
}
if (c[i][j] ==
(in[i - 4][j + 2] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 *
iext_ass + 2 * extension_cost)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 4;
j = j + 2;
state = 2;
traced = 1;
break;
}
if (c[i][j] ==
(in[i - 2][j + 4] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 *
iext_ass + 2 * extension_cost)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 2;
j = j + 4;
state = 2;
traced = 1;
break;
}
if (c[i][j] ==
(inx[i - 3][j + 1] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass +
iext_ass + 2 * extension_cost)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 3;
j = j + 1;
state = 5;
traced = 1;
break;
}
if (c[i][j] ==
(iny[i - 1][j + 3] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass +
iext_ass + 2 * extension_cost)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 1;
j = j + 3;
state = 6;
traced = 1;
break;
}
if (c[i][j] == (bx[i - 2][j + 1] + 2 * extension_cost + bext + bAU)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 2;
j = j + 1;
state = 3;
traced = 1;
break;
}
if (c[i][j] == (by[i - 1][j + 2] + 2 * extension_cost + bext + bAU)) {
k = i;
l = j;
st1[i - 1] = '(';
st2[j - 1] = ')';
i = i - 1;
j = j + 2;
state = 4;
traced = 1;
break;
}
break;
case 2:
if (in[i][j] == (in[i - 1][j + 1] + iext_s)) {
i--;
j++;
state = 2;
traced = 1;
break;
}
if (in[i][j] == (in[i - 1][j] + iext_ass)) {
i = i - 1;
state = 2;
traced = 1;
break;
}
if (in[i][j] == (in[i][j + 1] + iext_ass)) {
j++;
state = 2;
traced = 1;
break;
}
type2 = pair[S2[j + 1]][S1[i - 1]];
if (type2 &&
in[i][j] == (c[i - 1][j + 1] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s)) {
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 3:
if (bx[i][j] == (bx[i - 1][j] + bext)) {
i--;
state = 3;
traced = 1;
break;
}
type2 = pair[S2[j]][S1[i - 1]];
if (type2 && bx[i][j] == (c[i - 1][j] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0))) {
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 4:
if (by[i][j] == (by[i][j + 1] + bext)) {
j++;
state = 4;
traced = 1;
break;
}
type2 = pair[S2[j + 1]][S1[i]];
if (type2 && by[i][j] == (c[i][j + 1] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0))) {
int temp;
temp = k;
k = i;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 5:
if (inx[i][j] == (inx[i - 1][j] + iext_ass)) {
i--;
state = 5;
traced = 1;
break;
}
type2 = pair[S2[j + 1]][S1[i - 1]];
if (type2 &&
inx[i][j] ==
(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s)) {
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
i = k;
j = l;
state = 1;
traced = 1;
break;
}
case 6:
if (iny[i][j] == (iny[i][j + 1] + iext_ass)) {
j++;
state = 6;
traced = 1;
break;
}
type2 = pair[S2[j + 1]][S1[i - 1]];
if (type2 &&
iny[i][j] ==
(c[i - 1][j + 1] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s)) {
int temp;
temp = k;
k = i - 1;
i = temp;
temp = l;
l = j + 1;
j = temp;
type = pair[S1[i]][S2[j]];
*dG += E_IntLoop(i - k - 1,
l - j - 1,
type2,
rtype[type],
SS1[k + 1],
SS2[l - 1],
SS1[i - 1],
SS2[j + 1],
P);
i = k;
j = l;
state = 1;
traced = 1;
break;
}
}
}
if (!traced) {
E = c[i][j];
/**
*** if (i>1) {E -= P->dangle5[type][SS1[i-1]]+extension_cost; *dG+=P->dangle5[type][SS1[i-1]];}
*** if (j<n4){E -= P->dangle3[type][SS2[j+1]]+extension_cost; *dG+=P->dangle3[type][SS2[j+1]];}
*** if (type>2) {E -= P->TerminalAU; *dG+=P->TerminalAU;}
**/
int correction;
correction = E_ExtLoop(type, (i > 1) ? SS1[i - 1] : -1, (j < n4) ? SS2[j + 1] : -1, P);
*dG += correction;
E -= correction + 2 * extension_cost;
if (E != P->DuplexInit + 2 * extension_cost) {
vrna_message_error("backtrack failed in second fold duplex");
} else {
*dG += P->DuplexInit;
st1[i - 1] = '(';
st2[j - 1] = ')';
}
}
if (i > 11)
i--;
if (j < n4 - 10)
j++;
struc = (char *)vrna_alloc(i0 - i + 1 + j - j0 + 1 + 2);
for (k = MAX2(i, 1); k <= i0; k++)
if (!st1[k - 1])
st1[k - 1] = '.';
for (k = j0; k <= j; k++)
if (!st2[k - 1])
st2[k - 1] = '.';
strcpy(struc, st1 + MAX2(i - 1, 0));
strcat(struc, "&");
strcat(struc, st2 + j0 - 1);
/* printf("%s %3d,%-3d : %3d,%-3d\n", struc, i,i0,j0,j); */
free(st1);
free(st2);
return struc;
}
duplexT **
Lduplexfold(const char *s1,
const char *s2,
const int threshold,
const int extension_cost,
const int alignment_length,
const int delta,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
/**
*** See variable definition in fduplexfold_XS
**/
int i, j;
int bopen = b_b;
int bext = b_a + extension_cost;
int iopen = il_b;
int iext_s = 2 * (il_a + extension_cost); /* iext_s 2 nt nucleotide extension of interior loop, on i and j side */
int iext_ass = 50 + il_a + extension_cost; /* iext_ass assymetric extension of interior loop, either on i or on j side. */
int min_colonne = INF; /* enthaelt das maximum einer kolonne */
int i_length;
int max_pos; /* get position of the best hit */
int max_pos_j;
int temp = INF;
int min_j_colonne;
int max = INF;
int *position; /* contains the position of the hits with energy > E */
int *position_j;
/**
*** 1D array corresponding to the standard 2d recursion matrix
*** Makes the computation 20% faster
**/
int *SA;
vrna_md_t md;
/**
*** variable initialization
**/
n1 = (int)strlen(s1);
n2 = (int)strlen(s2);
/**
*** Sequence encoding
**/
set_model_details(&md);
if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) {
update_fold_params();
if (P)
free(P);
P = vrna_params(&md);
make_pair_matrix();
}
encode_seqs(s1, s2);
/**
*** Position of the high score on the target and query sequence
**/
position = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int));
position_j = (int *)vrna_alloc((delta + n1 + 3 + delta) * sizeof(int));
/**
*** instead of having 4 2-dim arrays we use a unique 1-dim array
*** The mapping 2d -> 1D is done based ont the macro
*** LCI(i,j,l) ((i )*l + j)
*** LINI(i,j,l) ((i + 5)*l + j)
*** LBXI(i,j,l) ((i + 10)*l + j)
*** LBYI(i,j,l) ((i + 15)*l + j)
*** LINIX(i,j,l) ((i + 20)*l + j)
*** LINIY(i,j,l) ((i + 25)*l + j)
***
*** SA has a length of 5 (number of columns we look back) *
*** * 6 (number of structures we look at) *
*** * length of the sequence
**/
SA = (int *)vrna_alloc(sizeof(int) * 5 * 6 * (n2 + 5));
for (j = n2 + 4; j >= 0; j--) {
SA[(j *
30)] =
SA[(j * 30) + 1] = SA[(j * 30) + 2] = SA[(j * 30) + 3] = SA[(j * 30) + 4] = INF;
SA[(j * 30) +
5] =
SA[(j * 30) + 1 +
5] =
SA[(j * 30) + 2 + 5] = SA[(j * 30) + 3 + 5] = SA[(j * 30) + 4 + 5] = INF;
SA[(j * 30) +
10] =
SA[(j * 30) + 1 +
10] =
SA[(j * 30) + 2 + 10] = SA[(j * 30) + 3 + 10] = SA[(j * 30) + 4 + 10] = INF;
SA[(j * 30) +
15] =
SA[(j * 30) + 1 +
15] =
SA[(j * 30) + 2 + 15] = SA[(j * 30) + 3 + 15] = SA[(j * 30) + 4 + 15] = INF;
SA[(j * 30) +
20] =
SA[(j * 30) + 1 +
20] =
SA[(j * 30) + 2 + 20] = SA[(j * 30) + 3 + 20] = SA[(j * 30) + 4 + 20] = INF;
SA[(j * 30) +
25] =
SA[(j * 30) + 1 +
25] =
SA[(j * 30) + 2 + 25] = SA[(j * 30) + 3 + 25] = SA[(j * 30) + 4 + 25] = INF;
}
i = 10;
i_length = n1 - 9;
while (i < i_length) {
int idx = i % 5;
int idx_1 = (i - 1) % 5;
int idx_2 = (i - 2) % 5;
int idx_3 = (i - 3) % 5;
int idx_4 = (i - 4) % 5;
j = n2 - 9;
while (9 < --j) {
int type, type2;
type = pair[S1[i]][S2[j]];
/**
*** Start duplex
**/
SA[LCI(idx, j, n2)] = type ? P->DuplexInit + 2 * extension_cost : INF;
/**
*** update lin bx by linx liny matrix
**/
type2 = pair[S2[j + 1]][S1[i - 1]];
/**
*** start/extend interior loop
**/
SA[LINI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->mismatchI[type2][SS2[j]][SS1[i]] + iopen + iext_s,
SA[LINI(idx_1, j, n2)] + iext_ass);
/**
*** start/extend nx1 target
*** use same type2 as for in
**/
SA[LINIX(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s,
SA[LINIX(idx_1, j, n2)] + iext_ass);
/**
*** start/extend 1xn target
*** use same type2 as for in
**/
SA[LINIY(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->mismatch1nI[type2][SS2[j]][SS1[i]] + iopen + iext_s,
SA[LINIY(idx, j + 1, n2)] + iext_ass);
/**
*** extend interior loop
**/
SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)], SA[LINI(idx, j + 1, n2)] + iext_ass);
SA[LINI(idx, j, n2)] = MIN2(SA[LINI(idx, j, n2)], SA[LINI(idx_1, j + 1, n2)] + iext_s);
/**
*** start/extend bulge target
**/
type2 = pair[S2[j]][S1[i - 1]];
SA[LBXI(idx, j, n2)] = MIN2(SA[LBXI(idx_1, j, n2)] + bext,
SA[LCI(idx_1, j,
n2)] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0));
/**
*** start/extend bulge query
**/
type2 = pair[S2[j + 1]][S1[i]];
SA[LBYI(idx, j, n2)] = MIN2(SA[LBYI(idx, j + 1, n2)] + bext,
SA[LCI(idx, j + 1,
n2)] + bopen + bext + (type2 > 2 ? P->TerminalAU : 0));
/**
***end update recursion
***##################### Start stack extension ######################
**/
if (!type)
continue; /**
*** stack extension
**/
SA[LCI(idx, j, n2)] += E_ExtLoop(type, SS1[i - 1], SS2[j + 1], P) + 2 * extension_cost;
/**
*** stack extension
**/
if ((type2 = pair[S1[i - 1]][S2[j + 1]]))
SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_1, j + 1,
n2)] + P->stack[rtype[type]][type2] + 2 * extension_cost,
SA[LCI(idx, j, n2)]);
/**
*** 1x0 / 0x1 stack extension
**/
if ((type2 = pair[S1[i - 1]][S2[j + 2]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_1, j + 2,
n2)] + P->bulge[1] + P->stack[rtype[type]][type2] + 3 * extension_cost,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 2]][S2[j + 1]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_2, j + 1,
n2)] + P->bulge[1] + P->stack[type2][rtype[type]] + 3 * extension_cost,
SA[LCI(idx, j, n2)]);
}
/**
*** 1x1 / 2x2 stack extension
**/
if ((type2 = pair[S1[i - 2]][S2[j + 2]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_2, j + 2,
n2)] + P->int11[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]] + 4 * extension_cost,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 3]][S2[j + 3]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_3, j + 3,
n2)] +
P->int22[type2][rtype[type]][SS1[i - 2]][SS1[i - 1]][SS2[j + 1]][SS2[j +
2]] + 6 * extension_cost,
SA[LCI(idx, j, n2)]);
}
/**
*** 1x2 / 2x1 stack extension
*** E_IntLoop(1,2,type2, rtype[type],SS1[i-1], SS2[j+2], SS1[i-1], SS2[j+1], P) corresponds to
*** P->int21[rtype[type]][type2][SS2[j+2]][SS1[i-1]][SS1[i-1]]
**/
if ((type2 = pair[S1[i - 3]][S2[j + 2]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_3, j + 2,
n2)] +
P->int21[rtype[type]][type2][SS2[j + 1]][SS1[i - 2]][SS1[i - 1]] + 5 * extension_cost,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 2]][S2[j + 3]])) {
SA[LCI(idx, j,
n2)] = MIN2(SA[LCI(idx_2, j + 3,
n2)] +
P->int21[type2][rtype[type]][SS1[i - 1]][SS2[j + 1]][SS2[j + 2]] + 5 * extension_cost,
SA[LCI(idx, j, n2)]);
}
/**
*** 2x3 / 3x2 stack extension
**/
if ((type2 = pair[S1[i - 4]][S2[j + 3]])) {
SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_4, j + 3,
n2)] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 3]][SS2[j + 2]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost,
SA[LCI(idx, j, n2)]);
}
if ((type2 = pair[S1[i - 3]][S2[j + 4]])) {
SA[LCI(idx, j, n2)] = MIN2(SA[LCI(idx_3, j + 4,
n2)] + P->internal_loop[5] + P->ninio[2] +
P->mismatch23I[type2][SS1[i - 2]][SS2[j + 3]] +
P->mismatch23I[rtype[type]][SS2[j + 1]][SS1[i - 1]] + 7 * extension_cost,
SA[LCI(idx, j, n2)]);
}
/**
*** So now we have to handle 1x3, 3x1, 3x3, and mxn m,n > 3
**/
/**
*** 3x3 or more
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINI(idx_3, j + 3,
n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + 2 * iext_s + 2 * extension_cost,
SA[LCI(idx, j, n2)]);
/**
*** 2xn or more
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINI(idx_4, j + 2,
n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost,
SA[LCI(idx, j, n2)]);
/**
*** nx2 or more
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINI(idx_2, j + 4,
n2)] + P->mismatchI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_s + 2 * iext_ass + 2 * extension_cost,
SA[LCI(idx, j, n2)]);
/**
*** nx1 n>2
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINIX(idx_3, j + 1,
n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost,
SA[LCI(idx, j, n2)]);
/**
*** 1xn n>2
**/
SA[LCI(idx, j,
n2)] = MIN2(SA[LINIY(idx_1, j + 3,
n2)] + P->mismatch1nI[rtype[type]][SS1[i - 1]][SS2[j + 1]] + iext_ass + iext_ass + 2 * extension_cost,
SA[LCI(idx, j, n2)]);
/**
*** nx0 n>1
**/
int bAU;
bAU = (type > 2 ? P->TerminalAU : 0);
SA[LCI(idx, j,
n2)] =
MIN2(SA[LBXI(idx_2, j + 1, n2)] + 2 * extension_cost + bext + bAU, SA[LCI(idx, j, n2)]);
/**
*** 0xn n>1
**/
SA[LCI(idx, j,
n2)] =
MIN2(SA[LBYI(idx_1, j + 2, n2)] + 2 * extension_cost + bext + bAU, SA[LCI(idx, j, n2)]);
temp = min_colonne;
min_colonne = MIN2(SA[LCI(idx, j, n2)] + E_ExtLoop(rtype[type], SS2[j - 1], SS1[i + 1],
P) + 2 * extension_cost,
min_colonne);
if (temp > min_colonne)
min_j_colonne = j;
}
if (max >= min_colonne) {
max = min_colonne;
max_pos = i;
max_pos_j = min_j_colonne;
}
position[i + delta] = min_colonne;
min_colonne = INF;
position_j[i + delta] = min_j_colonne;
i++;
}
/* printf("MAX: %d",max); */
free(S1);
free(S2);
free(SS1);
free(SS2);
if (max < threshold) {
find_max(position,
position_j,
delta,
threshold,
alignment_length,
s1,
s2,
extension_cost,
fast,
il_a,
il_b,
b_a,
b_b);
}
if (max < INF) {
plot_max(max,
max_pos,
max_pos_j,
alignment_length,
s1,
s2,
extension_cost,
fast,
il_a,
il_b,
b_a,
b_b);
}
free(SA);
free(position);
free(position_j);
return NULL;
}
PRIVATE void
find_max(const int *position,
const int *position_j,
const int delta,
const int threshold,
const int alignment_length,
const char *s1,
const char *s2,
const int extension_cost,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
int pos = n1 - 9;
if (fast == 1) {
while (10 < pos--) {
int temp_min = 0;
if (position[pos + delta] < (threshold)) {
int search_range;
search_range = delta + 1;
while (--search_range)
if (position[pos + delta - search_range] <= position[pos + delta - temp_min])
temp_min = search_range;
pos -= temp_min;
int max_pos_j;
max_pos_j = position_j[pos + delta];
int max;
max = position[pos + delta];
printf("target upper bound %d: query lower bound %d (%5.2f) \n",
pos - 10,
max_pos_j - 10,
((double)max) / 100);
pos = MAX2(10, pos + temp_min - delta);
}
}
} else if (fast == 2) {
pos = n1 - 9;
while (10 < pos--) {
int temp_min = 0;
if (position[pos + delta] < (threshold)) {
int search_range;
search_range = delta + 1;
while (--search_range)
if (position[pos + delta - search_range] <= position[pos + delta - temp_min])
temp_min = search_range;
pos -= temp_min;
int max_pos_j;
max_pos_j = position_j[pos + delta];
/* max_pos_j und pos entsprechen die realen position
* in der erweiterten sequenz.
* pos=1 -> position 1 in the sequence (and not 0 like in C)
* max_pos_j -> position 1 in the sequence ( not 0 like in C)
*/
int alignment_length2;
alignment_length2 = MIN2(n1, n2);
int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */
int end_t = MIN2(n1 - 10, pos + 1);
int begin_q = MAX2(11, max_pos_j - 1); /* 10 */
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20));
strcpy(s3, "NNNNNNNNNN");
strcpy(s4, "NNNNNNNNNN");
strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1);
strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1);
strcat(s3, "NNNNNNNNNN");
strcat(s4, "NNNNNNNNNN");
s3[end_t - begin_t + 1 + 20] = '\0';
s4[end_q - begin_q + 1 + 20] = '\0';
duplexT test;
test = fduplexfold(s3, s4, extension_cost, il_a, il_b, b_a, b_b);
if (test.energy * 100 < threshold) {
int l1 = strchr(test.structure, '&') - test.structure;
printf("%s %3d,%-3d : %3d,%-3d (%5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n", test.structure,
begin_t - 10 + test.i - l1 - 10,
begin_t - 10 + test.i - 1 - 10,
begin_q - 10 + test.j - 1 - 10,
(begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10,
test.energy, test.energy_backtrack, pos - 10, max_pos_j - 10,
((double)position[pos + delta]) / 100);
pos = MAX2(10, pos + temp_min - delta);
}
free(s3);
free(s4);
free(test.structure);
}
}
}
#if 0
else if (fast == 3) {
pos = n1 - 9;
while (10 < pos--) {
int temp_min = 0;
if (position[pos + delta] < (threshold)) {
int search_range;
search_range = delta + 1;
while (--search_range)
if (position[pos + delta - search_range] <= position[pos + delta - temp_min])
temp_min = search_range;
pos -= temp_min;
int max_pos_j;
max_pos_j = position_j[pos + delta];
/* max_pos_j und pos entsprechen die realen position
* in der erweiterten sequenz.
* pos=1 -> position 1 in the sequence (and not 0 like in C)
* max_pos_j -> position 1 in the sequence ( not 0 like in C)
*/
//Here we can start the reverse recursion for the
//Starting from the reported pos / max_pos_j we start the recursion
//We have to be careful with the fact that all energies are inverted.
int alignment_length2;
//Select the smallest interaction length in order to define the new interaction length
alignment_length2 = MIN2(n1 - pos + 1, max_pos_j - 1 + 1);
//
int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */
int end_t = MIN2(n1 - 10, pos + 1);
int begin_q = MAX2(11, max_pos_j - 1); /* 10 */
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20));
strcpy(s3, "NNNNNNNNNN");
strcpy(s4, "NNNNNNNNNN");
strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1);
strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1);
strcat(s3, "NNNNNNNNNN");
strcat(s4, "NNNNNNNNNN");
s3[end_t - begin_t + 1 + 20] = '\0';
s4[end_q - begin_q + 1 + 20] = '\0';
duplexT test;
test = fduplexfold(s4, s3, extension_cost, il_a, il_b, b_a, b_b);
if (test.energy * 100 < threshold) {
int structureLength = strlen(test.structure);
int l1 = strchr(test.structure, '&') - test.structure;
int start_t, end_t, start_q, end_q;
/*reverse structure string*/
char *reverseStructure = (char *)vrna_alloc(sizeof(char) * (structureLength + 1));
int posStructure;
for (posStructure = l1 + 1; posStructure < structureLength; posStructure++) {
if (test.structure[posStructure] == ')')
reverseStructure[posStructure - l1 - 1] = '(';
else
reverseStructure[posStructure - l1 - 1] = test.structure[posStructure];
}
reverseStructure[structureLength - 1 - l1] = '&';
for (posStructure = 0; posStructure < l1; posStructure++) {
if (test.structure[posStructure] == '(')
reverseStructure[structureLength + posStructure - l1] = ')';
else
reverseStructure[structureLength + posStructure - l1] = test.structure[posStructure];
}
reverseStructure[structureLength] = '\0';
// l1=strchr(reverse.structure, '&')-test.structure;
printf("%s %3d,%-3d : %3d,%-3d (%5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n",
reverseStructure,
begin_t - 10 + test.j - 1 - 10,
(begin_t - 11) + test.j + strlen(test.structure) - l1 - 2 - 10,
begin_q - 10 + test.i - l1 - 10,
begin_q - 10 + test.i - 1 - 10,
test.energy,
test.energy_backtrack,
pos,
max_pos_j,
((double)position[pos + delta]) / 100);
pos = MAX2(10, pos + temp_min - delta);
}
free(s3);
free(s4);
free(test.structure);
}
}
}
#endif
else {
pos = n1 - 9;
while (10 < pos--) {
int temp_min = 0;
if (position[pos + delta] < (threshold)) {
int search_range;
search_range = delta + 1;
while (--search_range)
if (position[pos + delta - search_range] <= position[pos + delta - temp_min])
temp_min = search_range;
pos -= temp_min;
int max_pos_j;
max_pos_j = position_j[pos + delta];
/* max_pos_j und pos entsprechen die realen position
* in der erweiterten sequenz.
* pos=1 -> position 1 in the sequence (and not 0 like in C)
* max_pos_j -> position 1 in the sequence ( not 0 like in C)
*/
int alignment_length2;
alignment_length2 = MIN2(n1, n2);
int begin_t = MAX2(11, pos - alignment_length2 + 1); /* 10 */
int end_t = MIN2(n1 - 10, pos + 1);
int begin_q = MAX2(11, max_pos_j - 1); /* 10 */
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2));
strncpy(s3, (s1 + begin_t - 1), end_t - begin_t + 1);
strncpy(s4, (s2 + begin_q - 1), end_q - begin_q + 1);
s3[end_t - begin_t + 1] = '\0';
s4[end_q - begin_q + 1] = '\0';
duplexT test;
test = duplexfold(s3, s4, extension_cost);
if (test.energy * 100 < threshold) {
int l1 = strchr(test.structure, '&') - test.structure;
printf("%s %3d,%-3d : %3d,%-3d (%5.2f) i:%d,j:%d <%5.2f>\n", test.structure,
begin_t - 10 + test.i - l1,
begin_t - 10 + test.i - 1,
begin_q - 10 + test.j - 1,
(begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2,
test.energy, pos - 10, max_pos_j - 10, ((double)position[pos + delta]) / 100);
pos = MAX2(10, pos + temp_min - delta);
}
free(s3);
free(s4);
free(test.structure);
}
}
}
}
PRIVATE void
plot_max(const int max,
const int max_pos,
const int max_pos_j,
const int alignment_length,
const char *s1,
const char *s2,
const int extension_cost,
const int fast,
const int il_a,
const int il_b,
const int b_a,
const int b_b)
{
if (fast == 1) {
printf("target upper bound %d: query lower bound %d (%5.2f)\n", max_pos - 10, max_pos_j - 10,
((double)max) / 100);
} else if (fast == 2) {
int alignment_length2;
alignment_length2 = MIN2(n1, n2);
int begin_t = MAX2(11, max_pos - alignment_length2 + 1); /* 10 */
int end_t = MIN2(n1 - 10, max_pos + 1);
int begin_q = MAX2(11, max_pos_j - 1); /* 10 */
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2 + 20));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2 + 20));
strcpy(s3, "NNNNNNNNNN");
strcpy(s4, "NNNNNNNNNN");
strncat(s3, (s1 + begin_t - 1), end_t - begin_t + 1);
strncat(s4, (s2 + begin_q - 1), end_q - begin_q + 1);
strcat(s3, "NNNNNNNNNN");
strcat(s4, "NNNNNNNNNN");
s3[end_t - begin_t + 1 + 20] = '\0';
s4[end_q - begin_q + 1 + 20] = '\0';
duplexT test;
test = fduplexfold(s3, s4, extension_cost, il_a, il_b, b_a, b_b);
int l1 = strchr(test.structure, '&') - test.structure;
printf("%s %3d,%-3d : %3d,%-3d (%5.2f) [%5.2f] i:%d,j:%d <%5.2f>\n", test.structure,
begin_t - 10 + test.i - l1 - 10,
begin_t - 10 + test.i - 1 - 10,
begin_q - 10 + test.j - 1 - 10,
(begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2 - 10,
test.energy, test.energy_backtrack, max_pos - 10, max_pos_j - 10, ((double)max) / 100);
free(s3);
free(s4);
free(test.structure);
} else {
duplexT test;
int alignment_length2;
alignment_length2 = MIN2(n1, n2);
int begin_t = MAX2(11, max_pos - alignment_length2 + 1);
int end_t = MIN2(n1 - 10, max_pos + 1);
int begin_q = MAX2(11, max_pos_j - 1);
int end_q = MIN2(n2 - 10, max_pos_j + alignment_length2 - 1);
char *s3 = (char *)vrna_alloc(sizeof(char) * (end_t - begin_t + 2));
char *s4 = (char *)vrna_alloc(sizeof(char) * (end_q - begin_q + 2));
strncpy(s3, (s1 + begin_t - 1), end_t - begin_t + 1);
strncpy(s4, (s2 + begin_q - 1), end_q - begin_q + 1);
s3[end_t - begin_t + 1] = '\0';
s4[end_q - begin_q + 1] = '\0';
test = duplexfold(s3, s4, extension_cost);
int l1 = strchr(test.structure, '&') - test.structure;
printf("%s %3d,%-3d : %3d,%-3d (%5.2f) i:%d,j:%d <%5.2f>\n", test.structure,
begin_t - 10 + test.i - l1,
begin_t - 10 + test.i - 1,
begin_q - 10 + test.j - 1,
(begin_q - 11) + test.j + (int)strlen(test.structure) - l1 - 2,
test.energy, max_pos - 10, max_pos_j - 10, ((double)max) / 100);
free(s3);
free(s4);
free(test.structure);
}
}
PRIVATE void
update_dfold_params(void)
{
vrna_md_t md;
if (P)
free(P);
set_model_details(&md);
P = vrna_params(&md);
make_pair_matrix();
}
PRIVATE void
encode_seqs(const char *s1,
const char *s2)
{
unsigned int i, l;
l = strlen(s1);
S1 = encode_seq(s1);
SS1 = (short *)vrna_alloc(sizeof(short) * (l + 1));
/* SS1 exists only for the special X K and I bases and energy_set!=0 */
for (i = 1; i <= l; i++) /* make numerical encoding of sequence */
SS1[i] = alias[S1[i]]; /* for mismatches of nostandard bases */
l = strlen(s2);
S2 = encode_seq(s2);
SS2 = (short *)vrna_alloc(sizeof(short) * (l + 1));
/* SS2 exists only for the special X K and I bases and energy_set!=0 */
for (i = 1; i <= l; i++) /* make numerical encoding of sequence */
SS2[i] = alias[S2[i]]; /* for mismatches of nostandard bases */
}
PRIVATE short *
encode_seq(const char *sequence)
{
unsigned int i, l;
short *S;
l = strlen(sequence);
S = (short *)vrna_alloc(sizeof(short) * (l + 2));
S[0] = (short)l;
/* make numerical encoding of sequence */
for (i = 1; i <= l; i++)
S[i] = (short)encode_char(toupper(sequence[i - 1]));
/* for circular folding add first base at position n+1 */
S[l + 1] = S[1];
return S;
}
int
arraySize(duplexT **array)
{
int site_count = 0;
while (array[site_count] != NULL)
site_count++;
return site_count;
}
void
freeDuplexT(duplexT **array)
{
int size = arraySize(array);
while (--size) {
free(array[size]->structure);
free(array[size]);
}
free(array[0]->structure);
free(array);
}
|
AI_model1.c | #include"AI.h"
#include <omp.h>
#define MAXSTEP 3
//#define CHECK_SCORE
//This is for simple spawn
//the simulation function for the branches in the searching tree
int ai_model1_simulate(GameState *gameState, Player *player, int depth)
{
if(depth<=0)return ai_sum_scores(gameState,player);
int MaxScore=-60000;
int playerTurn=gameState->playerTurn;
int total_num_moves=0;
vector MovesStart,MovesEnd;
vector_init(&MovesStart);
vector_init(&MovesEnd);
int cnt=0;
for(int i=0;i<64;i++)
{
vector CurLegalMoves=env_get_legal_moves(gameState,player,i);
cnt=CurLegalMoves.count;
if(cnt>0){
vector_cat(&MovesEnd,&CurLegalMoves);
for(int j=0;j<cnt;j++) vector_add(&MovesStart,i);
}
vector_free(&CurLegalMoves);
total_num_moves+=cnt;
}
assert(MovesStart.count==MovesEnd.count);
int *Scores=malloc(sizeof(int)*total_num_moves);
omp_set_num_threads(2);
#pragma omp parallel for shared(total_num_moves,gameState,player,MovesStart,MovesEnd,depth,Scores,playerTurn)
for(int i=0;i<total_num_moves;i++)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,vector_get(&MovesStart,i),vector_get(&MovesEnd,i));
int score=playerTurn*ai_model1_simulate(&simulation,player,depth-1);
Scores[i]=score;
env_free_state(&simulation);
}
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
vector_free(&MovesStart);
vector_free(&MovesEnd);
free(Scores);
return MaxScore*playerTurn;
}
//the play function for the root in the searching tree, return the quit from check_end
int ai_model1_play(GameState *gameState, Player *player, int maxStep)
{
int check_end=env_check_end(gameState,player);
if(check_end!=0)
{
env_free_container(gameState);
return check_end;
}
int MaxScore=-60000;
int score;
// vector MovesStart,MovesEnd,Scores;
// vector_init(&BestMovesID);
// vector_init(&MovesStart);
// vector_init(&MovesEnd);
// vector_init(&Scores);
int container_size=gameState->moves_vector_cnt;
int total_num_moves=0;
int *accu_container_size_arr=malloc(sizeof(int)*gameState->moves_vector_cnt);
for(int i=0;i<container_size;i++){
total_num_moves+=gameState->container[i].legal_moves.count;
if(i==0)accu_container_size_arr[0]=0;
else accu_container_size_arr[i]=accu_container_size_arr[i-1]+gameState->container[i-1].legal_moves.count;
}
int *MovesStart=malloc(sizeof(int)*total_num_moves);
int *MovesEnd=malloc(sizeof(int)*total_num_moves);
int *Scores=malloc(sizeof(int)*total_num_moves);
omp_set_num_threads(16);
omp_set_nested(1);
#pragma omp parallel for shared(MovesStart,MovesEnd)
for(int i=0;i<container_size;i++)
{
vector CurLegalMoves=gameState->container[i].legal_moves;
int cnt=CurLegalMoves.count;
int pos=gameState->container[i].pos;
for(int j=0;j<cnt;j++){
MovesStart[accu_container_size_arr[i]+j]=pos;
MovesEnd[accu_container_size_arr[i]+j]=vector_get(&CurLegalMoves,j);
}
}
// assert(MovesStart.count==MovesEnd.count);
int playerTurn=gameState->playerTurn;
#pragma omp parallel for shared(Scores)
for(int i=0;i<total_num_moves;i++)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,MovesStart[i],MovesEnd[i]);
score=playerTurn*ai_model1_simulate(&simulation,player,maxStep);
Scores[i]=score;
env_free_state(&simulation);
}
int BestMovesCnt=0;
vector BestMovesID;
vector_init(&BestMovesID);
if(stack_check_repeated_move(gameState->moves_stack)){
int MaxScoresArr[6];
for(int i=0;i<6;i++)MaxScoresArr[i]=-60000;
int MinScoreID,MinScoreArrValue;
for(int i=0;i<total_num_moves;i++){
MinScoreArrValue=MaxScoresArr[0];
MinScoreID=0;
for(int j=1;j<6;j++){
if(MaxScoresArr[j]<MinScoreArrValue){
MinScoreArrValue=MaxScoresArr[j];
MinScoreID=j;
}
}
MaxScoresArr[MinScoreID]=MAX(MaxScoresArr[MinScoreID],Scores[i]);
}
for(int i=0;i<total_num_moves;i++){
for(int j=0;j<6;j++){
if(Scores[i]==MaxScoresArr[j]){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
}
else{
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
for(int i=0;i<total_num_moves;i++){
if(Scores[i]==MaxScore){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
int id=vector_get(&BestMovesID,rand()%BestMovesCnt);
#ifdef CHECK_SCORE
printf("It is %d playing\n",gameState->playerTurn);
ai_print_board(gameState);
printf("Current Score is %d\n",ai_sum_scores(gameState,player));
#endif
env_play(gameState,player,MovesStart[id],MovesEnd[id]);
#ifdef CHECK_SCORE
printf("The player has decided to move from %d to %d\n",vector_get(&MovesStart,id),vector_get(&MovesEnd,id));
ai_print_board(gameState);
printf("After making the move, the score is %d\n",ai_sum_scores(gameState,player));
#endif
vector_free(&BestMovesID);
free(MovesStart);
free(MovesEnd);
free(Scores);
env_free_container(gameState);
return 0;
}
|
bml_adjungate_triangle_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_introspection.h"
#include "../bml_logger.h"
#include "../bml_types.h"
#include "bml_adjungate_triangle_ellpack.h"
#include "bml_types_ellpack.h"
#include <stdio.h>
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Adjungates a triangle of a matrix in place.
*
* \ingroup adjungate_triangle_group
*
* \param A[in,out] The matrix for which the triangle should be adjungated
* \param triangle[out] Which triangle to adjungate ('u': upper, 'l': lower)
*
* WARNING: Please verify race conditions and parallel performances.
*/
void TYPED_FUNC(
bml_adjungate_triangle_ellpack) (
bml_matrix_ellpack_t * A,
char *triangle)
{
int A_N = A->N;
int A_M = A->M;
int l;
int ll;
int j;
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
#ifdef _OPENMP
omp_lock_t lock[A_M];
#endif
switch (*triangle)
{
case 'u':
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_init_lock(&(lock[i]));
#endif
#pragma omp parallel for \
shared(A_N, A_M, A_index, A_nnz, A_value, lock) \
private(l, ll)
// WARNING: Please, check for race conditions ...
for (int i = 0; i < A_N; i++) // For every row
{
l = A_nnz[i];
for (int j = 0; j < l; j++) // We search for indices gt 0.
{
ll = A_index[ROWMAJOR(i, j, A_N, A_M)];
if (ll > 0)
{
if (ll > i)
{
#ifdef _OPENMP
omp_set_lock(&(lock[ll]));
#endif
A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i;
A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] =
conj(A_value[ROWMAJOR(i, j, A_N, A_M)]);
A_nnz[ll]++;
#ifdef _OPENMP
omp_unset_lock(&(lock[ll]));
#endif
}
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_destroy_lock(&(lock[i]));
#endif
break;
case 'l':
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_init_lock(&(lock[i]));
#endif
#pragma omp parallel for \
shared(lock, A_N, A_M, A_index, A_nnz, A_value) \
private(l, ll)
// WARNING: Please, check for race conditions and parallel performances ...
for (int i = 0; i < A_N; i++)
{
l = A_nnz[i];
for (int j = 0; j < l; j++)
{
ll = A_index[ROWMAJOR(i, j, A_N, A_M)];
if (ll >= 0)
{
if (ll < i)
{
#ifdef _OPENMP
omp_set_lock(&(lock[ll]));
#endif
A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i;
A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] =
conj(A_value[ROWMAJOR(i, j, A_N, A_M)]);
A_nnz[ll]++;
#ifdef _OPENMP
omp_unset_lock(&(lock[ll]));
#endif
}
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_destroy_lock(&(lock[i]));
#endif
break;
default:
LOG_ERROR("unknown triangle %c\n", triangle);
break;
}
}
|
GB_binop__times_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_03__times_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc32)
// A*D function (colscale): GB (_AxD__times_fc32)
// D*A function (rowscale): GB (_DxB__times_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc32)
// C=scalar+B GB (_bind1st__times_fc32)
// C=scalar+B' GB (_bind1st_tran__times_fc32)
// C=A+scalar GB (_bind2nd__times_fc32)
// C=A'+scalar GB (_bind2nd_tran__times_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_mul (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC32_mul (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FC32 || GxB_NO_TIMES_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_FC32_mul (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_FC32_mul (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_mul (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_mul (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// \brief This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP clause.
class OMPClause {
/// \brief Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// \brief Ending location of the clause.
SourceLocation EndLoc;
/// \brief Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// \brief Returns the starting location of the clause.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns the ending location of the clause.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// \brief This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of variables in the list.
unsigned NumVars;
protected:
/// \brief Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// \brief Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// \brief Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// \brief This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// \brief Location of ':' (if any).
SourceLocation ColonLoc;
/// \brief Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = OMPD_unknown;
/// \brief Name modifier location.
SourceLocation NameModifierLoc;
/// \brief Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// \brief Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// \brief Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// \brief Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// \brief Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// \brief Build an empty clause.
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// \brief Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// \brief Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
};
/// \brief This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// \brief Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// \brief Build 'final' clause with condition \a Cond.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Cond Condition of the clause.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc),
Condition(Cond) {}
/// \brief Build an empty clause.
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
};
/// \brief This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// \brief Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// \brief Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// \brief Build an empty clause.
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
};
/// \brief This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *Safelen = nullptr;
/// \brief Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// \brief Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// \brief Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
};
/// \brief This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// \brief Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// \brief Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Simdlen(Len) {}
/// \brief Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simdlen;
}
};
/// \brief This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of for-loops.
Stmt *NumForLoops = nullptr;
/// \brief Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// \brief Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// \brief Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
};
/// \brief This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'default' clause.
OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown;
/// \brief Start location of the kind in source code.
SourceLocation KindKwLoc;
/// \brief Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
/// \brief Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// \brief Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// \brief Build an empty clause.
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns kind of the clause.
OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
/// \brief Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
};
/// \brief This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'proc_bind' clause.
OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown;
/// \brief Start location of the kind in source code.
SourceLocation KindKwLoc;
/// \brief Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; }
/// \brief Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// \brief Build an empty clause.
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns kind of the clause.
OpenMPProcBindClauseKind getProcBindKind() const { return Kind; }
/// \brief Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
};
/// \brief This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// \brief Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// \brief Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// \brief Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// \brief Location of ',' (if any).
SourceLocation CommaLoc;
/// \brief Chunk size.
Expr *ChunkSize = nullptr;
/// \brief Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// \brief Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// \brief Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// \brief Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// \brief Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// \brief Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// \brief Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// \brief Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// \brief Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// \brief Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// \brief Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// \brief Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// \brief Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// \brief Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// \brief Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// \brief Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// \brief Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// \brief Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// \brief Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// \brief Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
};
/// \brief This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of for-loops.
Stmt *NumForLoops = nullptr;
/// \brief Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// \brief Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// \brief Build an empty clause.
explicit OMPOrderedClause()
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
};
/// \brief This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// \brief Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
};
/// \brief This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// \brief Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
};
/// \brief This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// \brief Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
};
/// \brief This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// \brief Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
};
/// \brief This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// \brief Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
};
/// \brief This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
class OMPUpdateClause : public OMPClause {
public:
/// \brief Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_update, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPUpdateClause()
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
};
/// \brief This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// \brief Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
};
/// \brief This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// \brief Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
};
/// \brief This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// \brief Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// \brief This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// \brief Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// \brief This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// \brief Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
Stmt *PreInit, Expr *PostUpdate);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// \brief Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// \brief This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// \brief This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// \brief Name of custom operator.
DeclarationNameInfo NameInfo;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
OMPClauseWithPostUpdate(this) {}
/// \brief Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// \brief Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// \brief Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// \brief Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// \brief Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// \brief Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// \brief Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// \brief Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// \brief Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_in_reduction;
}
};
/// \brief This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// \brief Location of linear modifier if any.
SourceLocation ModifierLoc;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// \brief Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// \brief Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
OMPClauseWithPostUpdate(this) {}
/// \brief Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// \brief Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// \brief Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// \brief Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// \brief Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// \brief Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// \brief Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// \brief Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// \brief Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// \brief Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// \brief Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// \brief Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// \brief Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// \brief Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// \brief Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// \brief Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// \brief Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// \brief This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// \brief Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars) {}
public:
/// \brief Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// \brief Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// \brief Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// \brief Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// \brief This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// \brief This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// \brief This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// \brief This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// \brief Dependency type location.
SourceLocation DepLoc;
/// \brief Colon location.
SourceLocation ColonLoc;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
explicit OMPDependClause(unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// \brief Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// \brief Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// \brief Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
static OMPDependClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N);
/// \brief Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// \brief Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// \brief Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Set the loop counter value for the depend clauses with 'sink|source' kind
/// of dependency. Required for codegen.
void setCounterValue(Expr *V);
/// Get the loop counter value.
Expr *getCounterValue();
/// Get the loop counter value.
const Expr *getCounterValue() const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
/// \brief This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Device number.
Stmt *Device = nullptr;
/// \brief Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
public:
/// \brief Build 'device' clause.
///
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// \brief Build an empty clause.
OMPDeviceClause()
: OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// \brief Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
child_range children() { return child_range(&Device, &Device + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_device;
}
};
/// \brief This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// \brief Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_threads, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPThreadsClause()
: OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_threads;
}
};
/// \brief This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// \brief Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_simd, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simd;
}
};
/// \brief Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Expression associated with the component.
Expr *AssociatedExpression = nullptr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// \brief List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// \brief List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// \brief Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// \brief Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// \brief This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// \brief Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// \brief Number of component lists in this clause.
unsigned NumComponentLists;
/// \brief Total number of components in this clause.
unsigned NumComponents;
protected:
/// \brief Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause - one
/// list for each expression in the clause.
/// \param NumComponents Total number of expression components in the clause.
OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars),
NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
/// \brief Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// \brief Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// \brief Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// \brief Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// \brief Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// \brief Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// \brief Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// \brief Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// \brief Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// \brief Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// \brief Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// \brief Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// \brief Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
public:
/// \brief Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// \brief Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// \brief Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// \brief Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// \brief Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
}
/// \brief Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
return std::make_pair(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize));
}
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// \brief Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef());
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()));
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// \brief Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef());
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
};
/// \brief This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// \brief Map type modifier for the 'map' clause.
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
/// \brief Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// \brief Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// \brief Location of the map type.
SourceLocation MapLoc;
/// \brief Colon location.
SourceLocation ColonLoc;
/// \brief Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapTypeModifier Map type modifier.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc,
NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents),
MapTypeModifier(MapTypeModifier), MapType(MapType),
MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
/// \brief Set type modifier for the clause.
///
/// \param T Type Modifier for the clause.
void setMapTypeModifier(OpenMPMapClauseKind T) { MapTypeModifier = T; }
/// \brief Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// \brief Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// \brief Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param TypeModifier Map type modifier.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
OpenMPMapClauseKind TypeModifier,
OpenMPMapClauseKind Type, bool TypeIsImplicit,
SourceLocation TypeLoc);
/// \brief Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
/// \brief Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// \brief Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// \brief Fetches the map type modifier for the clause.
OpenMPMapClauseKind getMapTypeModifier() const LLVM_READONLY {
return MapTypeModifier;
}
/// \brief Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// \brief Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_map;
}
};
/// \brief This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief NumTeams number.
Stmt *NumTeams = nullptr;
/// \brief Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// \brief Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// \brief Build an empty clause.
OMPNumTeamsClause()
: OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// \brief Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_teams;
}
};
/// \brief This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// \brief Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// \brief Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// \brief Build an empty clause.
OMPThreadLimitClause()
: OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// \brief Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_thread_limit;
}
};
/// \brief This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Priority number.
Stmt *Priority = nullptr;
/// \brief Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// \brief Build 'priority' clause.
///
/// \param E Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc),
Priority(E) {}
/// \brief Build an empty clause.
OMPPriorityClause()
: OMPClause(OMPC_priority, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// \brief Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_priority;
}
};
/// \brief This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// \brief Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// \brief Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc),
Grainsize(Size) {}
/// \brief Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_grainsize;
}
};
/// \brief This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// \brief Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
OMPNogroupClause()
: OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nogroup;
}
};
/// \brief This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// \brief Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// \brief Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumTasks(Size) {}
/// \brief Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_tasks;
}
};
/// \brief This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// \brief Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// \brief Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// \brief Build an empty clause.
OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_hint;
}
};
/// \brief This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// \brief Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// \brief Location of ',' (if any).
SourceLocation CommaLoc;
/// \brief Chunk size.
Expr *ChunkSize = nullptr;
/// \brief Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// \brief Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// \brief Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// \brief Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// \brief Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// \brief Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// \brief Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// \brief Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// \brief Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// \brief Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// \brief Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// \brief Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dist_schedule;
}
};
/// \brief This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// \brief Locations of modifiers.
SourceLocation ModifierLoc;
/// \brief A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// \brief Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// \brief Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// \brief Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// \brief Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// \brief Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// \brief Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
/// \brief Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {}
/// \brief Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// \brief Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// \brief Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// \brief Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// \brief Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_defaultmap;
}
};
/// \brief This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPToClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_to, StartLoc, LParenLoc, EndLoc, NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPToClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_to, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// \brief Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPToClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_to;
}
};
/// \brief This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// \brief Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPFromClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_from, StartLoc, LParenLoc, EndLoc,
NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPFromClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_from, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// \brief Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPFromClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPUseDevicePtrClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_use_device_ptr, StartLoc, LParenLoc,
EndLoc, NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPUseDevicePtrClause(unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_use_device_ptr, SourceLocation(),
SourceLocation(), SourceLocation(), NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> Vars,
ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPUseDevicePtrClause *CreateEmpty(const ASTContext &C,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_use_device_ptr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPIsDevicePtrClause(SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_is_device_ptr, StartLoc, LParenLoc,
EndLoc, NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPIsDevicePtrClause(unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_is_device_ptr, SourceLocation(),
SourceLocation(), SourceLocation(), NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPIsDevicePtrClause *CreateEmpty(const ASTContext &C,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_is_device_ptr;
}
};
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
Matrix.h | #pragma once
#include <Vector.h>
namespace freeaml
{
/**
* @brief @c Matrix<T> is a class suited for representing dense matrices.
*
* This class stores a matrix of elements of type @c T. It overloads the
* addition (+), subtraction (-), multiplication (*) and division (/) operators
* for supporting common matrix operations such as matrix addition, matrix
* multiplication by scalar and matrix multiplication with another matrix or a
* vector.
*
* Some commonly used mathematical operations are also provided in the class
* (e.g. determining the transpose of the matrix).
*
* Support for OpenMP was added to the functions and operators which showed a
* significant speedup when implemented using multiple threads.
*/
template<typename T>
class Matrix
{
public:
using value_type = typename Vector<T>::value_type;
using size_type = typename Vector<T>::size_type;
using reference = typename Vector<T>::reference;
using const_reference = typename Vector<T>::const_reference;
/** @brief Constructs a matrix with no elements. */
Matrix();
/**
* @brief Constructs a matrix with the contents of an initializer list of
* equally-sized initializer lists, each one representing a row of
* the matrix.
* @param init An initializer list holding initializer lists of elements of
* type @c T.
*/
Matrix(std::initializer_list<std::initializer_list<T>> init);
/**
* @brief Constructs a matrix with the elements of a vector.
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
* @param elements A vector holding the <tt>rows × cols</tt> matrix elements
* in row-major order (i.e., the elements on the first matrix row
* followed by the elements on the second matrix row and so on).
*/
Matrix(size_type rows, size_type cols, const Vector<T>& elements);
/**
* @brief Constructs a matrix with the elements of a vector.
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
* @param elements A vector holding the <tt>rows × cols</tt> matrix elements
* in row-major order (i.e., the elements on the first matrix row
* followed by the elements on the second matrix row and so on).
*/
Matrix(size_type rows, size_type cols, Vector<T>&& elements);
/**
* @brief Constructs a matrix with all elements initialized with a value.
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
* @param x The initializing value for every element of the matrix.
*/
Matrix(size_type rows, size_type cols, const T& x = T{});
/**
* @brief Copy constructor.
* @param M The matrix from which all elements will be copied.
*/
Matrix(const Matrix& M) = default;
/**
* @brief Move constructor.
* @param M The matrix from which all elements will be moved.
*/
Matrix(Matrix&& M) = default;
/**
* @brief Returns a reference to a matrix element.
* @param i The row of the matrix element.
* @param j The column of the matrix element.
* @return A reference to the element <tt>(i,j)</tt> of the matrix.
*/
reference operator()(size_type i, size_type j);
/**
* @brief Returns a const reference to a matrix element.
* @param i The row of the matrix element.
* @param j The column of the matrix element.
* @return A const reference to the element <tt>(i,j)</tt> of the matrix.
*/
const_reference operator()(size_type i, size_type j) const;
/**
* @brief Copy-assignment operator.
* @param M The matrix from which all elements will be copied.
* @return A reference to @c *this.
*/
Matrix& operator=(const Matrix& M) = default;
/**
* @brief Move-assignment operator.
* @param M The matrix from which all elements will be moved.
* @return A reference to @c *this.
*/
Matrix& operator=(Matrix&& M) = default;
/**
* @brief Equality-comparison operator.
* @param M A matrix to compare against.
* @return @c true if the matrix is equal to @c M, @c false otherwise.
*/
bool operator==(const Matrix& M) const;
/**
* @brief Inequality-comparison operator.
* @param M A matrix to compare against.
* @return @c true if the matrix is not equal to @c M, @c false otherwise.
*/
bool operator!=(const Matrix& M) const;
/**
* @brief Multiplies all elements of the matrix by a scalar.
* @param c A scalar.
* @return A reference to @c *this.
*/
Matrix& operator*=(const T& c);
/**
* @brief Divides all elements of the matrix by a scalar.
* @param c A scalar.
* @return A reference to @c *this.
*/
Matrix& operator/=(const T& c);
/**
* @brief Performs element-wise addition-assignment with another matrix.
* @param M A matrix.
* @return A reference to @c *this.
*/
Matrix& operator+=(const Matrix& M);
/**
* @brief Performs element-wise subtraction-assignment with another matrix.
* @param M A matrix.
* @return A reference to @c *this.
*/
Matrix& operator-=(const Matrix& M);
/**
* @brief Computes the transpose of the matrix.
* @return A copy of the transpose of the matrix.
*/
Matrix transpose() const;
/**
* @brief Computes the max-norm of the matrix.
* @return The magnitude of the largest-in-magnitude matrix element.
*/
T max_norm() const;
/**
* @brief Checks if the matrix has the same number of rows and columns.
* @return @c true if the matrix is square, @c false otherwise.
*/
bool is_square() const;
/**
* @brief Checks if the matrix is symmetric.
* @return @c true if the matrix is symmetric, @c false otherwise.
*/
bool is_symmetric() const;
/**
* @brief Gets the number of rows in the matrix.
* @return The number of rows in the matrix.
*/
size_type num_rows() const;
/**
* @brief Gets the number of columns in the matrix.
* @return The number of columns in the matrix.
*/
size_type num_cols() const;
/**
* @brief Checks if the matrix is empty.
* @return @c true if the matrix is empty, @c false otherwise.
*/
bool empty() const;
/**
* @brief Resizes the matrix.
* @param rows The new number of matrix rows.
* @param cols The new number of matrix columns.
* @param x The initializing value for new elements of the matrix.
*/
void resize(size_type rows, size_type cols, const T& x = T{});
/**
* @brief Clears the matrix.
*/
void clear();
/**
* @brief Returns the matrix elements as a vector (in row-major order).
* @return The elements of the matrix stored on a vector, with the first
* row elements appearing first, then the second row elements and
* so on.
*/
const Vector<T>& flatten() const;
private:
size_type rows_; /* number of matrix rows */
size_type cols_; /* number of matrix columns */
Vector<T> elements_; /* matrix elements in row-major order */
}; /* class Matrix<T> */
/**
* @brief Computes the multiplication of a matrix by a scalar on the right.
* @param M A matrix.
* @param c A scalar.
* @return A copy of @c M with all elements multiplied by @c c.
*/
template<typename T>
Matrix<T> operator*(const Matrix<T>& M, const T& c);
/**
* @brief Computes the multiplication of a matrix by a scalar on the left.
* @param c A scalar.
* @param M A matrix.
* @return A copy of @c M with all elements multiplied by @c c.
*/
template<typename T>
Matrix<T> operator*(const T& c, const Matrix<T>& M);
/**
* @brief Computes the multiplication of two matrices.
* @param M1 a matrix.
* @param M2 a matrix.
* @return A matrix which is the result of multipying @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator*(const Matrix<T>& M1, const Matrix<T>& M2);
/**
* @brief Computes the multiplication of a matrix and a vector.
* @param M a matrix.
* @param v a vector (interpreted as a column vector).
* @return A vector which is the result of multipying @c M and @c v.
*/
template<typename T>
Vector<T> operator*(const Matrix<T>& M, const Vector<T>& v);
/**
* @brief Computes the multiplication of a vector and a matrix.
* @param v a vector (interpreted as a row vector).
* @param M a matrix.
* @return A vector which is the result of multipying @c v and @c M.
*/
template<typename T>
Vector<T> operator*(const Vector<T>& v, const Matrix<T>& M);
/**
* @brief Computes the division of a matrix by a scalar.
* @param M A matrix.
* @param c A scalar.
* @return A copy of @c M with all elements divided by @c c.
*/
template<typename T>
Matrix<T> operator/(const Matrix<T>& M, const T& c);
/**
* @brief Computes the matrix addition of two equally-sized matrices.
* @param M1 A matrix.
* @param M2 A matrix.
* @return The element-wise sum of @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator+(const Matrix<T>& M1, const Matrix<T>& M2);
/**
* @brief Computes the matrix difference of two equally-sized matrices.
* @param M1 A matrix.
* @param M2 A matrix.
* @return The element-wise difference between @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator-(const Matrix<T>& M1, const Matrix<T>& M2);
/**
* @brief Computes the element-wise negation of a matrix.
* @param M A matrix.
* @return The element-wise negation of @c M.
*/
template<typename T>
Matrix<T> operator-(const Matrix<T>& M);
/**
* @brief Prints the elements of a matrix to an output stream.
* @param stream An output stream.
* @param M A matrix.
* @return A reference to @c stream.
*/
template<typename T>
std::ostream& operator<<(std::ostream& stream, const Matrix<T>& M);
/**
* @brief Generates a random matrix with elements within a given range.
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
* @param lower_bound The lower bound for the sample interval.
* @param upper_bound The upper bound for the sample interval.
* @return A <tt>rows × cols</tt> matrix with elements sampled uniformly from
* <tt>[lower_bound, upper_bound]</tt>.
* @note This function was designed to work only with primitive integer and
* floating-point types (e.g. @c int, @c float, @c double etc.).
*/
template<typename T>
Matrix<T> random_matrix(typename Matrix<T>::size_type rows,
typename Matrix<T>::size_type cols,
const T& lower_bound = T{0},
const T& upper_bound = T{1});
/**
* @brief Generates an identity matrix.
* @param rows The number of matrix rows.
* @return A @c rows × @c rows identity matrix.
*/
template<typename T>
Matrix<T> identity_matrix(const typename Matrix<T>::size_type rows);
/*******************************************************************************
*
* FUNCTION DEFINITIONS
*
******************************************************************************/
template<typename T>
Matrix<T>::Matrix() : rows_{0}, cols_{0}
{
/* nothing needs to be done here */
}
template<typename T>
Matrix<T>::Matrix(std::initializer_list<std::initializer_list<T>> init)
: rows_(init.size()), cols_(init.size() > 0 ? init.begin()->size() : 0)
{
if (cols_ == 0)
{
clear();
return;
}
elements_.reserve(rows_ * cols_);
for (const auto& row : init)
{
FREEAML_ASSERT(row.size() == cols_);
for (const T& element : row)
{
elements_.push_back(element);
}
}
}
template<typename T>
Matrix<T>::Matrix(const size_type rows,
const size_type cols,
const T& x /* = T{} */)
: rows_(rows), cols_(cols), elements_(rows * cols, x)
{
if (rows_ == 0 || cols_ == 0)
{
clear();
}
}
template<typename T>
Matrix<T>::Matrix(const size_type rows,
const size_type cols,
Vector<T>&& elements)
: rows_(rows), cols_(cols), elements_(std::move(elements))
{
FREEAML_ASSERT(rows_ * cols_ == elements_.size());
if (rows_ == 0 || cols_ == 0)
{
clear();
}
}
template<typename T>
Matrix<T>::Matrix(const size_type rows,
const size_type cols,
const Vector<T>& elements)
: rows_(rows), cols_(cols), elements_(elements)
{
FREEAML_ASSERT(rows_ * cols_ == elements_.size());
if (rows_ == 0 || cols_ == 0)
{
clear();
}
}
template<typename T>
typename Matrix<T>::reference Matrix<T>::operator()(const size_type i,
const size_type j)
{
FREEAML_ASSERT(i < num_rows() && j < num_cols());
return elements_[i * num_cols() + j];
}
template<typename T>
typename Matrix<T>::const_reference Matrix<T>::operator()(
const size_type i, const size_type j) const
{
FREEAML_ASSERT(i < num_rows() && j < num_cols());
return elements_[i * num_cols() + j];
}
template<typename T>
bool Matrix<T>::operator==(const Matrix<T>& M) const
{
return rows_ == M.rows_ && cols_ == M.cols_ && elements_ == M.elements_;
}
template<typename T>
bool Matrix<T>::operator!=(const Matrix<T>& M) const
{
return !operator==(M);
}
template<typename T>
Matrix<T>& Matrix<T>::operator*=(const T& c)
{
elements_ *= c;
return *this;
}
template<typename T>
Matrix<T>& Matrix<T>::operator/=(const T& c)
{
elements_ /= c;
return *this;
}
template<typename T>
Matrix<T>& Matrix<T>::operator+=(const Matrix<T>& M)
{
FREEAML_ASSERT(num_rows() == M.num_rows());
FREEAML_ASSERT(num_cols() == M.num_cols());
elements_ += M.elements_;
return *this;
}
template<typename T>
Matrix<T>& Matrix<T>::operator-=(const Matrix<T>& M)
{
FREEAML_ASSERT(num_rows() == M.num_rows());
FREEAML_ASSERT(num_cols() == M.num_cols());
elements_ -= M.elements_;
return *this;
}
template<typename T>
Matrix<T> Matrix<T>::transpose() const
{
Matrix<T> result(num_cols(), num_rows(), T{});
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < num_rows(); ++i)
{
for (size_type j = 0; j < num_cols(); ++j)
{
result(j, i) = (*this)(i, j);
}
}
return result;
}
template<typename T>
T Matrix<T>::max_norm() const
{
return elements_.linf_norm();
}
template<typename T>
bool Matrix<T>::is_square() const
{
return num_rows() == num_cols();
}
template<typename T>
bool Matrix<T>::is_symmetric() const
{
if (is_square() == false)
{
return false;
}
for (size_type i = 0; i < num_rows(); ++i)
{
for (size_type j = i + 1; j < num_cols(); ++j)
{
if ((*this)(i, j) != (*this)(j, i))
{
return false;
}
}
}
return true;
}
template<typename T>
typename Matrix<T>::size_type Matrix<T>::num_rows() const
{
return rows_;
}
template<typename T>
typename Matrix<T>::size_type Matrix<T>::num_cols() const
{
return cols_;
}
template<typename T>
bool Matrix<T>::empty() const
{
return elements_.empty();
}
template<typename T>
void Matrix<T>::resize(const size_type rows,
const size_type cols,
const T& x /* = T{} */)
{
/* optimized case: if we are only changing the number of rows */
if (cols == num_cols())
{
elements_.resize(rows * cols, x);
rows_ = rows;
return;
}
if (rows == 0 || cols == 0)
{
clear();
return;
}
Vector<T> elements;
elements.reserve(rows * cols);
for (size_type i = 0; i < rows; ++i)
{
for (size_type j = 0; j < cols; ++j)
{
if (i < num_rows() && j < num_cols())
{
elements.push_back((*this)(i, j));
}
else
{
elements.push_back(x);
}
}
}
rows_ = rows;
cols_ = cols;
elements_ = std::move(elements);
}
template<typename T>
void Matrix<T>::clear()
{
rows_ = 0;
cols_ = 0;
elements_.clear();
}
template<typename T>
const Vector<T>& Matrix<T>::flatten() const
{
return elements_;
}
template<typename T>
Matrix<T> operator*(const Matrix<T>& M, const T& c)
{
Matrix<T> result = M;
result *= c;
return result;
}
template<typename T>
Matrix<T> operator*(const T& c, const Matrix<T>& M)
{
return M * c;
}
template<typename T>
Matrix<T> operator*(const Matrix<T>& M1, const Matrix<T>& M2)
{
FREEAML_ASSERT(M1.num_cols() == M2.num_rows());
using size_type = typename Matrix<T>::size_type;
Matrix<T> result(M1.num_rows(), M2.num_cols(), T{});
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < M1.num_rows(); ++i)
{
for (size_type k = 0; k < M1.num_cols(); ++k)
{
for (size_type j = 0; j < M2.num_cols(); ++j)
{
result(i, j) += M1(i, k) * M2(k, j);
}
}
}
return result;
}
template<typename T>
Vector<T> operator*(const Matrix<T>& M, const Vector<T>& v)
{
FREEAML_ASSERT(M.num_cols() == v.size());
using size_type = typename Matrix<T>::size_type;
Vector<T> result(M.num_rows(), T{});
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < M.num_rows(); ++i)
{
for (size_type j = 0; j < M.num_cols(); ++j)
{
result[i] += M(i, j) * v[j];
}
}
return result;
}
template<typename T>
Vector<T> operator*(const Vector<T>& v, const Matrix<T>& M)
{
FREEAML_ASSERT(v.size() == M.num_rows());
using size_type = typename Matrix<T>::size_type;
Vector<T> result(M.num_cols(), T{});
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type j = 0; j < M.num_cols(); ++j)
{
for (size_type i = 0; i < v.size(); ++i)
{
result[j] += v[i] * M(i, j);
}
}
return result;
}
template<typename T>
Matrix<T> operator/(const Matrix<T>& M, const T& c)
{
Matrix<T> result = M;
result /= c;
return result;
}
template<typename T>
Matrix<T> operator+(const Matrix<T>& M1, const Matrix<T>& M2)
{
Matrix<T> result = M1;
result += M2;
return result;
}
template<typename T>
Matrix<T> operator-(const Matrix<T>& M1, const Matrix<T>& M2)
{
Matrix<T> result = M1;
result -= M2;
return result;
}
template<typename T>
Matrix<T> operator-(const Matrix<T>& M)
{
return Matrix<T>(M.num_rows(), M.num_cols(), -M.flatten());
}
template<typename T>
std::ostream& operator<<(std::ostream& stream, const Matrix<T>& M)
{
using size_type = typename Matrix<T>::size_type;
for (size_type i = 0; i < M.num_rows(); ++i)
{
stream << (i == 0 ? "[[" : " [");
for (size_type j = 0; j < M.num_cols(); ++j)
{
stream << M(i, j) << (j + 1 == M.num_cols() ? "" : ", ");
}
stream << (i + 1 == M.num_rows() ? "]]" : "],");
}
return stream;
}
template<typename T>
Matrix<T> random_matrix(const typename Matrix<T>::size_type rows,
const typename Matrix<T>::size_type cols,
const T& lower_bound /* = T{0} */,
const T& upper_bound /* = T{1} */)
{
return Matrix<T>(rows, cols,
random_vector<T>(rows * cols, lower_bound, upper_bound));
}
template<typename T>
Matrix<T> identity_matrix(const typename Matrix<T>::size_type rows)
{
using size_type = typename Matrix<T>::size_type;
Matrix<T> I(rows, rows);
for (size_type i = 0; i < rows; ++i)
{
I(i, i) = T{1};
}
return I;
}
} /* namespace freeaml */
|
sum.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *X) {
for (int i = 0; i<N; i++) {
X[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
float sum(float *X) {
float result = 0;
#pragma omp simd
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
// Debug functions
float sum_serial(float *X) {
float result = 0;
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
void print_vector(float *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
float *X = malloc(sizeof(float)*N);
float result, result_serial;
srand(time(NULL));
init(X);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
result = sum(X);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
result_serial = sum_serial(X);
double t_serial = (read_timer() - start_serial);
print_vector(X);
puts("=\n");
printf("SIMD: %f\n", result);
puts("---------------------------------");
printf("Serial: %f\n", result_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %f\n", result_serial - result);
free(X);
return 0;
}
|
FEMTree.h | /*
Copyright (c) 2006, Michael Kazhdan and Matthew Bolitho
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the Johns Hopkins University nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
// -- [TODO] Make as many of the functions (related to the solver) const as possible.
// -- [TODO] Move the point interpolation constraint scaling by 1<<maxDepth
// -- [TODO] Add support for staggered-grid test functions
// -- [TODO] Store signatures with constraints/systems/restriction-prolongations
// -- [TODO] Make a virtual evaluation that only needs to know the degree
// -- [TODO] Modify (public) functions so that template parameters don't need to be passed when they are called
// -- [TODO] Confirm that whenever _isValidFEM*Node is called, the flags have already been set.
// -- [TODO] Make weight evaluation more efficient in _getSamplesPerNode by reducing the number of calls to getNeighbors
// -- [TODO] For point evaluation:
// 1. Have the evaluator store stencils for all depths [DONE]
// 2. When testing centers/corners, don't use generic evaluation
#ifndef FEM_TREE_INCLUDED
#define FEM_TREE_INCLUDED
#define VERSION "10.05"
#define MEMORY_ALLOCATOR_BLOCK_SIZE 1<<12
#define NEW_CODE
#include <atomic>
#include "MyMiscellany.h"
#include "BSplineData.h"
#include "Geometry.h"
#include "PointStream.h"
#include "RegularTree.h"
#include "SparseMatrix.h"
#include <functional>
template< unsigned int Dim , class Real > class FEMTree;
enum
{
SHOW_GLOBAL_RESIDUAL_NONE ,
SHOW_GLOBAL_RESIDUAL_LAST ,
SHOW_GLOBAL_RESIDUAL_ALL ,
SHOW_GLOBAL_RESIDUAL_COUNT
};
const char* ShowGlobalResidualNames[] = { "show none" , "show last" , "show all" };
class FEMTreeNodeData
{
public:
enum
{
SPACE_FLAG = 1 ,
FEM_FLAG_1 = 2 ,
FEM_FLAG_2 = 4 ,
REFINABLE_FLAG = 8 ,
GHOST_FLAG = 1<<7
};
int nodeIndex;
mutable char flags;
void setGhostFlag( bool f ) const { if( f ) flags |= GHOST_FLAG ; else flags &= ~GHOST_FLAG; }
bool getGhostFlag( void ) const { return ( flags & GHOST_FLAG )!=0; }
FEMTreeNodeData( void );
~FEMTreeNodeData( void );
};
template< unsigned int Dim >
class SortedTreeNodes
{
typedef RegularTreeNode< Dim , FEMTreeNodeData > TreeNode;
protected:
Pointer( Pointer( int ) ) _sliceStart;
int _levels;
public:
Pointer( TreeNode* ) treeNodes;
int begin( int depth ) const { return _sliceStart[depth][0]; }
int end( int depth ) const { return _sliceStart[depth][(size_t)1<<depth]; }
int begin( int depth , int slice ) const { return _sliceStart[depth][ slice<0 ? 0 : ( slice>(1<<depth) ? (1<<depth) : slice ) ]; }
int end( int depth , int slice ) const { return begin( depth , slice+1 ); }
int size( void ) const { return _sliceStart[_levels-1][(size_t)1<<(_levels-1)]; }
int size( int depth ) const { if(depth<0||depth>=_levels) printf( "uhoh\n" ); return _sliceStart[depth][(size_t)1<<depth] - _sliceStart[depth][0]; }
int size( int depth , int slice ) const { return end( depth , slice ) - begin( depth , slice ); }
int levels( void ) const { return _levels; }
SortedTreeNodes( void );
~SortedTreeNodes( void );
void set( TreeNode& root , std::vector< int >* map );
size_t set( TreeNode& root );
};
template< typename T > struct DotFunctor{};
template< > struct DotFunctor< float >
{
double operator()( float v1 , float v2 ){ return v1*v2; }
unsigned int dimension( void ) const { return 1; }
};
template< > struct DotFunctor< double >
{
double operator()( double v1 , double v2 ){ return v1*v2; }
unsigned int dimension( void ) const { return 1; }
};
template< class Real , unsigned int Dim > struct DotFunctor< Point< Real , Dim > >
{
double operator()( Point< Real , Dim > v1 , Point< Real , Dim > v2 ){ return Point< Real , Dim >::Dot( v1 , v2 ); }
unsigned int dimension( void ) const { return Dim; }
};
template< typename Pack > struct SupportKey{ };
template< unsigned int ... Degrees >
struct SupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart) ... > , UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > >
{
typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > LeftRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > RightRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportSize ) ... > Sizes;
};
template< typename Pack > struct ConstSupportKey{ };
template< unsigned int ... Degrees >
struct ConstSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > , UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > >
{
typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > LeftRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > RightRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportSize ) ... > Sizes;
};
template< typename Pack > struct OverlapKey{ };
template< unsigned int ... Degrees >
struct OverlapKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > , UIntPack< BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ... > >
{
typedef UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > LeftRadii;
typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ) ... > RightRadii;
typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapSize ) ... > Sizes;
};
template< typename Pack > struct ConstOverlapKey{ };
template< unsigned int ... Degrees >
struct ConstOverlapKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > , UIntPack< BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ... > >
{
typedef UIntPack< (-BSplineOverlapSizes< Degrees , Degrees >::OverlapStart ) ... > LeftRadii;
typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapEnd ) ... > RightRadii;
typedef UIntPack< ( BSplineOverlapSizes< Degrees , Degrees >::OverlapSize ) ... > Sizes;
};
template< typename Pack > struct PointSupportKey{ };
template< unsigned int ... Degrees >
struct PointSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > , UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > >
{
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > LeftRadii;
typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > RightRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd - BSplineSupportSizes< Degrees >::SupportStart + 1 ) ... > Sizes;
};
template< typename Pack > struct ConstPointSupportKey{ };
template< unsigned int ... Degrees >
struct ConstPointSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< BSplineSupportSizes< Degrees >::SupportEnd ... > , UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > >
{
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd ) ... > LeftRadii;
typedef UIntPack< (-BSplineSupportSizes< Degrees >::SupportStart ) ... > RightRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::SupportEnd - BSplineSupportSizes< Degrees >::SupportStart + 1 ) ... > Sizes;
};
template< typename Pack > struct CornerSupportKey{ };
template< unsigned int ... Degrees >
struct CornerSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template NeighborKey< UIntPack< BSplineSupportSizes< Degrees >::BCornerEnd ... > , UIntPack< ( -BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > >
{
typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerEnd ) ... > LeftRadii;
typedef UIntPack< (-BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > RightRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerSize + 1 ) ... > Sizes;
};
template< typename Pack > struct ConstCornerSupportKey{ };
template< unsigned int ... Degrees >
struct ConstCornerSupportKey< UIntPack< Degrees ... > > : public RegularTreeNode< sizeof...(Degrees) , FEMTreeNodeData >::template ConstNeighborKey< UIntPack< BSplineSupportSizes< Degrees >::BCornerEnd ... > , UIntPack< ( -BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > >
{
typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerEnd ) ... > LeftRadii;
typedef UIntPack< (-BSplineSupportSizes< Degrees >::BCornerStart + 1 ) ... > RightRadii;
typedef UIntPack< ( BSplineSupportSizes< Degrees >::BCornerSize + 1 ) ... > Sizes;
};
// This represents a vector that can only grow in size.
// It has the property that once a reference to an element is returned, that reference remains valid until the vector is destroyed.
template< typename T , unsigned int LogBlockSize=10 , unsigned InitialBlocks=10 , unsigned int AllocationMultiplier=2 >
struct BlockedVector
{
BlockedVector( T defaultValue=T() ) : _defaultValue( defaultValue )
{
_reservedBlocks = InitialBlocks;
_blocks = NewPointer< Pointer( T ) >( _reservedBlocks );
for( size_t i=0 ; i<_reservedBlocks ; i++ ) _blocks[i] = NullPointer( Pointer( T ) );
_allocatedBlocks = _size = 0;
}
~BlockedVector( void )
{
for( size_t i=0 ; i<_allocatedBlocks ; i++ ) DeletePointer( _blocks[i] );
DeletePointer( _blocks );
}
BlockedVector( const BlockedVector& v )
{
_reservedBlocks = v._reservedBlocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue;
_blocks = NewPointer< Pointer( T ) >( _reservedBlocks );
for( size_t i=0 ; i<_allocatedBlocks ; i++ )
{
_blocks[i] = NewPointer< T >( _BlockSize );
memcpy( _blocks[i] , v._blocks[i] , sizeof(T)*_BlockSize );
}
for( size_t i=_allocatedBlocks ; i<_reservedBlocks ; i++ ) _blocks[i] = NullPointer( Pointer ( T ) );
}
BlockedVector& operator = ( const BlockedVector& v )
{
for( size_t i=0 ; i<_allocatedBlocks ; i++ ) DeletePointer( _blocks[i] );
DeletePointer( _blocks );
_reservedBlocks = v._reservedBlocks , _blocks = v._blocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue;
_blocks = NewPointer< Pointer( T ) >( _reservedBlocks );
for( size_t i=0 ; i<_allocatedBlocks ; i++ )
{
_blocks[i] = NewPointer< T >( _BlockSize );
memcpy( _blocks[i] , v._blocks[i] , sizeof(T)*_BlockSize );
}
for( size_t i=_allocatedBlocks ; i<_reservedBlocks ; i++ ) _blocks[i] = NullPointer( Pointer ( T ) );
return *this;
}
BlockedVector( BlockedVector&& v )
{
_reservedBlocks = v._reservedBlocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue , _blocks = v._blocks;
v._reservedBlocks = v._allocatedBlocks = v._size = 0 , v._blocks = NullPointer( Pointer( T ) );
}
BlockedVector& operator = ( BlockedVector&& v )
{
for( size_t i=0 ; i<_allocatedBlocks ; i++ ) DeletePointer( _blocks[i] );
DeletePointer( _blocks );
_reservedBlocks = v._reservedBlocks , _allocatedBlocks = v._allocatedBlocks , _size = v._size , _defaultValue = v._defaultValue , _blocks = v._blocks;
v._reservedBlocks = v._allocatedBlocks = v._size = 0 , v._blocks = NullPointer( Pointer( T ) );
return *this;
}
size_t size( void ) const { return _size; }
const T& operator[]( size_t idx ) const { return _blocks[idx>>LogBlockSize][idx&_Mask]; }
T& operator[]( size_t idx ){ return _blocks[idx>>LogBlockSize][idx&_Mask]; }
size_t resize( size_t size ){ return resize( size , _defaultValue ); }
size_t resize( size_t size , const T& defaultValue )
{
if( size<=_size )
{
#ifdef _MSC_VER
WARN( "BlockedVector::resize: new size must be greater than old size: %llu > %llu" , size , _size );
#else // !MSC_VER
WARN( "BlockedVector::resize: new size must be greater than old size: %lu > %lu" , size , _size );
#endif // _MSC_VER
return _size;
}
size_t index = size-1;
size_t block = index >> LogBlockSize;
size_t blockIndex = index & _Mask;
// If there are insufficiently many blocks
if( block>=_reservedBlocks )
{
size_t newReservedSize = std::max< size_t >( _reservedBlocks * AllocationMultiplier , block+1 );
Pointer( Pointer( T ) ) __blocks = NewPointer< Pointer( T ) >( newReservedSize );
memcpy( __blocks , _blocks , sizeof( Pointer( T ) ) * _reservedBlocks );
for( size_t i=_reservedBlocks ; i<newReservedSize ; i++ ) __blocks[i] = NullPointer( Pointer( T ) );
Pointer( Pointer( T ) ) _oldBlocks = _blocks;
_blocks = __blocks;
_reservedBlocks = newReservedSize;
DeletePointer( _oldBlocks );
}
// If the block hasn't been allocated
if( block>=_allocatedBlocks )
{
for( size_t b=_allocatedBlocks ; b<=block ; b++ )
{
_blocks[b] = NewPointer< T >( _BlockSize );
for( size_t i=0 ; i<_BlockSize ; i++ ) _blocks[b][i] = defaultValue;
}
_allocatedBlocks = block+1;
}
_size = index+1;
return index;
}
size_t push( void ){ return resize( _size+1 ); }
protected:
static const size_t _BlockSize = 1<<LogBlockSize;
static const size_t _Mask = (1<<LogBlockSize)-1;
T _defaultValue;
size_t _allocatedBlocks , _reservedBlocks;
size_t _size;
Pointer( Pointer( T ) ) _blocks;
};
template< class Data , typename Pack > struct _SparseOrDenseNodeData{};
template< class Data , unsigned int ... FEMSigs >
struct _SparseOrDenseNodeData< Data , UIntPack< FEMSigs ... > >
{
static const unsigned int Dim = sizeof ... ( FEMSigs );
typedef UIntPack< FEMSigs ... > FEMSignatures;
typedef Data data_type;
virtual size_t size( void ) const = 0;
virtual const Data& operator[] ( int idx ) const = 0;
virtual Data& operator[] ( int idx ) = 0;
virtual Data& operator[]( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) = 0;
virtual Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) = 0;
virtual const Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const = 0;
};
template< class Data , typename Pack > struct SparseNodeData{};
template< class Data , unsigned int ... FEMSigs >
struct SparseNodeData< Data , UIntPack< FEMSigs ... > > : public _SparseOrDenseNodeData< Data , UIntPack< FEMSigs ... > >
{
static const unsigned int Dim = sizeof ... ( FEMSigs );
size_t size( void ) const { return _data.size(); }
const Data& operator[] ( int idx ) const { return _data[idx]; }
Data& operator[] ( int idx ) { return _data[idx]; }
void reserve( size_t sz ){ if( sz>_indices.size() ) _indices.resize( sz , -1 ); }
Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ){ return ( node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)_indices.size() || _indices[ node->nodeData.nodeIndex ]<0 ) ? NULL : &_data[ _indices[ node->nodeData.nodeIndex ] ]; }
const Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const { return ( node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)_indices.size() || _indices[ node->nodeData.nodeIndex ]<0 ) ? NULL : &_data[ _indices[ node->nodeData.nodeIndex ] ]; }
Data& operator[]( const RegularTreeNode< Dim , FEMTreeNodeData >* node )
{
// If the node hasn't been indexed yet
if( node->nodeData.nodeIndex>=(int)_indices.size() )
#pragma omp critical( SparseNodeData__operator )
if( node->nodeData.nodeIndex>=(int)_indices.size() ) _indices.resize( node->nodeData.nodeIndex+1 , -1 );
// If the node hasn't been allocated yet
if( _indices[ node->nodeData.nodeIndex ]==-1 )
#pragma omp critical( SparseNodeData__operator )
if( _indices[ node->nodeData.nodeIndex ]==-1 ) _indices[ node->nodeData.nodeIndex ] = (int)_data.push();
return _data[ _indices[ node->nodeData.nodeIndex ] ];
}
int index( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const
{
if( !node || node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)_indices.size() ) return -1;
else return _indices[ node->nodeData.nodeIndex ];
}
protected:
template< unsigned int _Dim , class _Real > friend class FEMTree;
// Map should be the size of the old number of entries and map[i] should give the new index of the old i-th node
void _remapIndices( const int* newNodeIndices , unsigned int newNodeCount )
{
BlockedVector< int > newIndices;
newIndices.resize( newNodeCount );
for( int i=0 ; i<(int)newNodeCount ; i++ ) newIndices[i] = -1;
for( size_t i=0 ; i<(int)_indices.size() ; i++ ) if( newNodeIndices[i]>=0 && newNodeIndices[i]<(int)newNodeCount ) newIndices[ newNodeIndices[i] ] = _indices[i];
_indices = newIndices;
}
BlockedVector< int > _indices;
BlockedVector< Data > _data;
};
template< class Data , typename Pack > struct DenseNodeData{};
template< class Data , unsigned int ... FEMSigs >
struct DenseNodeData< Data , UIntPack< FEMSigs ... > > : public _SparseOrDenseNodeData< Data , UIntPack< FEMSigs ... > >
{
static const unsigned int Dim = sizeof ... ( FEMSigs );
DenseNodeData( void ) { _data = NullPointer( Data ) ; _sz = 0; }
DenseNodeData( size_t sz ){ _sz = sz ; if( sz ) _data = NewPointer< Data >( sz ) ; else _data = NullPointer( Data ); }
DenseNodeData( const DenseNodeData& d ) : DenseNodeData() { _resize( d._sz ) ; if( _sz ) memcpy( _data , d._data , sizeof(Data) * _sz ); }
DenseNodeData( DenseNodeData&& d ){ _data = d._data , _sz = d._sz ; d._data = NullPointer( Data ) , d._sz = 0; }
DenseNodeData& operator = ( const DenseNodeData& d ){ _resize( d._sz ) ; if( _sz ) memcpy( _data , d._data , sizeof(Data) * _sz ) ; return *this; }
DenseNodeData& operator = ( DenseNodeData&& d ){ size_t __sz = _sz ; Pointer( Data ) __data = _data ; _data = d._data , _sz = d._sz ; d._data = __data , d._sz = __sz ; return *this; }
~DenseNodeData( void ){ DeletePointer( _data ) ; _sz = 0; }
static void WriteSignatures( FILE* fp )
{
unsigned int dim = sizeof ... ( FEMSigs );
fwrite( &dim , sizeof(unsigned int) , 1 , fp );
unsigned int femSigs[] = { FEMSigs ... };
fwrite( femSigs , sizeof(unsigned int) , dim , fp );
}
void write( FILE* fp ) const { fwrite( &_sz , sizeof(size_t) , 1 , fp ) ; fwrite( _data , sizeof(Data) , _sz , fp ); }
void read( FILE* fp )
{
if( fread( &_sz , sizeof(size_t) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read size" );
_data = NewPointer< Data >( _sz );
if( fread ( _data , sizeof(Data) , _sz , fp )!=_sz ) ERROR_OUT( "failed to read data" );
}
Data& operator[] ( int idx ) { return _data[idx]; }
const Data& operator[] ( int idx ) const { return _data[idx]; }
size_t size( void ) const { return _sz; }
Data& operator[]( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) { return _data[ node->nodeData.nodeIndex ]; }
Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) { return ( node==NULL || node->nodeData.nodeIndex>=(int)_sz ) ? NULL : &_data[ node->nodeData.nodeIndex ]; }
const Data* operator()( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const { return ( node==NULL || node->nodeData.nodeIndex>=(int)_sz ) ? NULL : &_data[ node->nodeData.nodeIndex ]; }
int index( const RegularTreeNode< Dim , FEMTreeNodeData >* node ) const { return ( !node || node->nodeData.nodeIndex<0 || node->nodeData.nodeIndex>=(int)this->_data.size() ) ? -1 : node->nodeData.nodeIndex; }
Pointer( Data ) operator()( void ) { return _data; }
ConstPointer( Data ) operator()( void ) const { return ( ConstPointer( Data ) )_data; }
protected:
template< unsigned int _Dim , class _Real > friend class FEMTree;
// Map should be the size of the old number of entries and map[i] should give the new index of the old i-th node
void _remapIndices( const int* newNodeIndices , size_t newNodeCount )
{
Pointer( Data ) newData = NewPointer< Data >( newNodeCount );
memset( newData , 0 , sizeof(Data)*newNodeCount );
for( size_t i=0 ; i<_sz ; i++ ) if( newNodeIndices[i]>=0 && newNodeIndices[i]<newNodeCount ) newData[ newNodeIndices[i] ] = _data[i];
DeletePointer( _data );
_data = newData;
_sz = newNodeCount;
}
size_t _sz;
void _resize( size_t sz ){ DeletePointer( _data ) ; if( sz ) _data = NewPointer< Data >( sz ) ; else _data = NullPointer( Data ) ; _sz = sz; }
Pointer( Data ) _data;
};
enum FEMTreeRealType
{
FEM_TREE_REAL_FLOAT ,
FEM_TREE_REAL_DOUBLE ,
FEM_TREE_REAL_COUNT
};
const char* FEMTreeRealNames[] = { "float" , "double" };
void ReadFEMTreeParameter( FILE* fp , FEMTreeRealType& realType , int &dimension )
{
if( fread( &realType , sizeof(FEMTreeRealType) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read real type" );
if( fread( &dimension , sizeof(int) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read dimension" );
}
unsigned int* ReadDenseNodeDataSignatures( FILE* fp , unsigned int &dim )
{
if( fread( &dim , sizeof(unsigned int) , 1 , fp )!=1 ) ERROR_OUT( "Failed to read dimension" );
unsigned int* femSigs = new unsigned int[dim];
if( fread( femSigs , sizeof(unsigned int) , dim , fp )!=dim ) ERROR_OUT( "Failed to read signatures" );
return femSigs;
}
// The Derivative method needs static members:
// Dim: the dimensionality of the space in which derivatives are evaluated
// Size: the total number of derivatives
// and static methods:
// Index: takes the number of partials along each dimension and returns the index
// Factor: takes an index and sets the number of partials along each dimension
template< typename T > struct TensorDerivatives{ };
template< class Real , typename T > struct TensorDerivativeValues{ };
// Specify the derivatives for each dimension separately
template< unsigned int D , unsigned int ... Ds >
struct TensorDerivatives< UIntPack< D , Ds ... > >
{
typedef TensorDerivatives< UIntPack< Ds ... > > _TensorDerivatives;
static const int LastDerivative = UIntPack< D , Ds ... >::template Get< sizeof ... (Ds) >();
static const int Dim = _TensorDerivatives::Dim + 1;
static const unsigned int Size = _TensorDerivatives::Size * ( D+1 );
static void Factor( unsigned int idx , unsigned int derivatives[Dim] ){ derivatives[0] = idx / _TensorDerivatives::Size ; _TensorDerivatives::Factor( idx % _TensorDerivatives::Size , derivatives+1 ); }
static unsigned int Index( const unsigned int derivatives[Dim] ){ return _TensorDerivatives::Index( derivatives + 1 ) + _TensorDerivatives::Size * derivatives[0]; }
};
template< unsigned int D >
struct TensorDerivatives< UIntPack< D > >
{
static const int LastDerivative = D;
static const int Dim = 1;
static const unsigned int Size = D+1;
static void Factor( unsigned int idx , unsigned int derivatives[1] ){ derivatives[0] = idx; }
static unsigned int Index( const unsigned int derivatives[1] ){ return derivatives[0]; }
};
template< class Real , unsigned int ... Ds > struct TensorDerivativeValues< Real , UIntPack< Ds ... > > : public Point< Real , TensorDerivatives< UIntPack< Ds ... > >::Size >{ };
// Specify the sum of the derivatives
template< unsigned int Dim , unsigned int D >
struct CumulativeDerivatives
{
typedef CumulativeDerivatives< Dim , D-1 > _CumulativeDerivatives;
static const int LastDerivative = D;
static const unsigned int Size = _CumulativeDerivatives::Size * Dim + 1;
static void Factor( unsigned int idx , unsigned int d[Dim] )
{
if( idx<_CumulativeDerivatives::Size ) return _CumulativeDerivatives::Factor( idx , d );
else _Factor( idx - _CumulativeDerivatives::Size , d );
}
static unsigned int Index( const unsigned int derivatives[Dim] )
{
int dCount = 0;
for( int d=0 ; d<Dim ; d++ ) dCount += derivatives[d];
if( dCount>=D ) ERROR_OUT( "More derivatives than allowed" );
else if( dCount<D ) return _CumulativeDerivatives::Index( derivatives );
else return _CumulativeDerivatives::Size + _Index( derivatives );
}
protected:
static const unsigned int _Size = _CumulativeDerivatives::_Size * Dim;
static void _Factor( unsigned int idx , unsigned int d[Dim] )
{
_CumulativeDerivatives::_Factor( idx % _CumulativeDerivatives::_Size , d );
d[ idx / _CumulativeDerivatives::_Size ]++;
}
static unsigned int _Index( const unsigned int d[Dim] )
{
unsigned int _d[Dim];
memcpy( _d , d , sizeof(_d) );
for( int i=0 ; i<Dim ; i++ ) if( _d[i] )
{
_d[i]--;
return _CumulativeDerivatives::Index( _d ) * Dim + i;
}
ERROR_OUT( "No derivatives specified" );
return -1;
}
friend CumulativeDerivatives< Dim , D+1 >;
};
template< unsigned int Dim >
struct CumulativeDerivatives< Dim , 0 >
{
static const int LastDerivative = 0;
static const unsigned int Size = 1;
static void Factor( unsigned int idx , unsigned int d[Dim] ){ memset( d , 0 , sizeof(unsigned int)*Dim ); }
static unsigned int Index( const unsigned int derivatives[Dim] ){ return 0; }
protected:
static const unsigned int _Size = 1;
static void _Factor( unsigned int idx , unsigned int d[Dim] ){ memset( d , 0 , sizeof(unsigned int)*Dim ); }
friend CumulativeDerivatives< Dim , 1 >;
};
template< typename Real , unsigned int Dim , unsigned int D > using CumulativeDerivativeValues = Point< Real , CumulativeDerivatives< Dim , D >::Size >;
template< unsigned int Dim , class Real , unsigned int D >
CumulativeDerivativeValues< Real , Dim , D > Evaluate( const double dValues[Dim][D+1] )
{
CumulativeDerivativeValues< Real , Dim , D > v;
unsigned int _d[Dim];
for( int d=0 ; d<CumulativeDerivatives< Dim , D >::Size ; d++ )
{
CumulativeDerivatives< Dim , D >::Factor( d , _d );
double value = dValues[0][ _d[0] ];
for( int dd=1 ; dd<Dim ; dd++ ) value *= dValues[dd][ _d[dd] ];
v[d] = (Real)value;
}
return v;
}
template< unsigned int Dim , class Real , typename T , unsigned int D >
struct DualPointInfo
{
Point< Real , Dim > position;
Real weight;
CumulativeDerivativeValues< T , Dim , D > dualValues;
DualPointInfo operator + ( const DualPointInfo& p ) const { return DualPointInfo( position + p.position , dualValues + p.dualValues , weight + p.weight ); }
DualPointInfo& operator += ( const DualPointInfo& p ){ position += p.position ; weight += p.weight , dualValues += p.dualValues ; return *this; }
DualPointInfo operator * ( Real s ) const { return DualPointInfo( position*s , weight*s , dualValues*s ); }
DualPointInfo& operator *= ( Real s ){ position *= s , weight *= s , dualValues *= s ; return *this; }
DualPointInfo operator / ( Real s ) const { return DualPointInfo( position/s , weight/s , dualValues/s ); }
DualPointInfo& operator /= ( Real s ){ position /= s , weight /= s , dualValues /= s ; return *this; }
DualPointInfo( void ) : weight(0) { }
DualPointInfo( Point< Real , Dim > p , CumulativeDerivativeValues< T , Dim , D > c , Real w ) { position = p , dualValues = c , weight = w; }
};
template< unsigned int Dim , class Real , typename Data , typename T , unsigned int D >
struct DualPointAndDataInfo
{
DualPointInfo< Dim , Real , T , D > pointInfo;
Data data;
DualPointAndDataInfo operator + ( const DualPointAndDataInfo& p ) const { return DualPointAndDataInfo( pointInfo + p.pointInfo , data + p.data ); }
DualPointAndDataInfo operator * ( Real s ) const { return DualPointAndDataInfo( pointInfo * s , data * s ); }
DualPointAndDataInfo operator / ( Real s ) const { return DualPointAndDataInfo( pointInfo / s , data / s ); }
DualPointAndDataInfo& operator += ( const DualPointAndDataInfo& p ){ pointInfo += p.pointInfo ; data += p.data ; return *this; }
DualPointAndDataInfo& operator *= ( Real s ) { pointInfo *= s , data *= s ; return *this; }
DualPointAndDataInfo& operator /= ( Real s ) { pointInfo /= s , data /= s ; return *this; }
DualPointAndDataInfo( void ){ }
DualPointAndDataInfo( DualPointInfo< Dim , Real , T , D > p , Data d ) { pointInfo = p , data = d; }
};
template< unsigned int Dim , class Real , typename T , unsigned int D >
struct DualPointInfoBrood
{
DualPointInfo< Dim , Real , T , D >& operator[]( size_t idx ){ return _dpInfo[idx]; }
const DualPointInfo< Dim , Real , T , D >& operator[]( size_t idx ) const { return _dpInfo[idx]; }
void finalize( void ){ _size = 0 ; for( int i=0 ; i<(1<<Dim) ; i++ ) if( _dpInfo[i].weight>0 ) _dpInfo[_size++] = _dpInfo[i]; }
unsigned int size( void ) const { return _size; }
DualPointInfoBrood operator + ( const DualPointInfoBrood& p ) const { DualPointInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] + p._dpInfo[i] ; return d; }
DualPointInfoBrood operator * ( Real s ) const { DualPointInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] * s ; return d; }
DualPointInfoBrood operator / ( Real s ) const { DualPointInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] / s ; return d; }
DualPointInfoBrood& operator += ( const DualPointInfoBrood& p ){ for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] += p._dpInfo[i] ; return *this; }
DualPointInfoBrood& operator *= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] *= s ; return *this; }
DualPointInfoBrood& operator /= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] /= s ; return *this; }
protected:
DualPointInfo< Dim , Real , T , D > _dpInfo[1<<Dim];
unsigned int _size;
};
template< unsigned int Dim , class Real , typename Data , typename T , unsigned int D >
struct DualPointAndDataInfoBrood
{
DualPointAndDataInfo< Dim , Real , Data , T , D >& operator[]( size_t idx ){ return _dpInfo[idx]; }
const DualPointAndDataInfo< Dim , Real , Data , T , D >& operator[]( size_t idx ) const { return _dpInfo[idx]; }
void finalize( void ){ _size = 0 ; for( int i=0 ; i<(1<<Dim) ; i++ ) if( _dpInfo[i].pointInfo.weight>0 ) _dpInfo[_size++] = _dpInfo[i]; }
unsigned int size( void ) const { return _size; }
DualPointAndDataInfoBrood operator + ( const DualPointAndDataInfoBrood& p ) const { DualPointAndDataInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] + p._dpInfo[i] ; return d; }
DualPointAndDataInfoBrood operator * ( Real s ) const { DualPointAndDataInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] * s ; return d; }
DualPointAndDataInfoBrood operator / ( Real s ) const { DualPointAndDataInfoBrood d ; for( int i=0 ; i<(1<<Dim) ; i++ ) d._dpInfo[i] = _dpInfo[i] / s ; return d; }
DualPointAndDataInfoBrood& operator += ( const DualPointAndDataInfoBrood& p ){ for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] += p._dpInfo[i] ; return *this; }
DualPointAndDataInfoBrood& operator *= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] *= s ; return *this; }
DualPointAndDataInfoBrood& operator /= ( Real s ) { for( int i=0 ; i<(1<<Dim) ; i++ ) _dpInfo[i] /= s ; return *this; }
protected:
DualPointAndDataInfo< Dim , Real , Data , T , D > _dpInfo[1<<Dim];
unsigned int _size;
};
////////////////////////////
// The virtual integrator //
////////////////////////////
struct BaseFEMIntegrator
{
template< typename TDegreePack > struct System{};
template< typename TDegreePack > struct RestrictionProlongation{};
template< typename TDegreePack , typename CDegreePack , unsigned int CDim > struct Constraint{};
template< typename TDegreePack > struct SystemConstraint{};
template< typename TDegreePack > struct PointEvaluator{};
protected:
template< unsigned int Degree , unsigned int ... Degrees >
static typename std::enable_if< sizeof ... ( Degrees )==0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] )
{
int begin , end;
BSplineSupportSizes< Degree >::InteriorSupportedSpan( depth , begin , end );
return off[0]>=begin && off[0]<end;
}
template< unsigned int Degree , unsigned int ... Degrees >
static typename std::enable_if< sizeof ... ( Degrees )!=0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] )
{
int begin , end;
BSplineSupportSizes< Degree >::InteriorSupportedSpan( depth , begin , end );
return ( off[0]>=begin && off[0]<end ) && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , off+1 );
}
template< unsigned int Degree , unsigned int ... Degrees >
static typename std::enable_if< sizeof ... ( Degrees )==0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] , const double begin[] , const double end[] )
{
int res = 1<<depth;
double b = ( 0. + off[0] + BSplineSupportSizes< Degree >::SupportStart ) / res;
double e = ( 1. + off[0] + BSplineSupportSizes< Degree >::SupportEnd ) / res;
return b>=begin[0] && e<=end[0];
}
template< unsigned int Degree , unsigned int ... Degrees >
static typename std::enable_if< sizeof ... ( Degrees )!=0 , bool >::type _IsInteriorlySupported( UIntPack< Degree , Degrees ... > , unsigned int depth , const int off[] , const double begin[] , const double end[] )
{
int res = 1<<depth;
double b = ( 0. + off[0] + BSplineSupportSizes< Degree >::SupportStart ) / res;
double e = ( 1. + off[0] + BSplineSupportSizes< Degree >::SupportEnd ) / res;
return b>=begin[0] && e<=end[0] && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , off+1 , begin+1 , end+1 );
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )==0 >::type _InteriorOverlappedSpan( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int depth , int begin[] , int end[] )
{
BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin[0] , end[0] );
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )!=0 >::type _InteriorOverlappedSpan( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int depth , int begin[] , int end[] )
{
BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin[0] , end[0] );
_InteriorOverlappedSpan( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , begin+1 , end+1 );
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )==0 , bool >::type _IsInteriorlyOverlapped( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] )
{
int begin , end;
BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin , end );
return off[0]>= begin && off[0]<end;
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )!=0 , bool >::type _IsInteriorlyOverlapped( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] )
{
int begin , end;
BSplineIntegrationData< FEMDegreeAndBType< Degree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< Degree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin , end );
return ( off[0]>= begin && off[0]<end ) && _IsInteriorlyOverlapped( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , off+1 );
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )==0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] , int start[] , int end[] )
{
const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart;
start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ off[0] & 1 ] - OverlapStart;
end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ off[0] & 1 ] - OverlapStart + 1;
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )!=0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , unsigned int depth , const int off[] , int start[] , int end[] )
{
const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart;
start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ off[0] & 1 ] - OverlapStart;
end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ off[0] & 1 ] - OverlapStart + 1;
_ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , off+1 , start+1 , end+1 );
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )==0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int corner , int start[] , int end[] )
{
const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart;
start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ corner & 1 ] - OverlapStart;
end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ corner & 1 ] - OverlapStart + 1;
}
template< unsigned int Degree1 , unsigned int ... Degrees1 , unsigned int Degree2 , unsigned int ... Degrees2 >
static typename std::enable_if< sizeof ... ( Degrees1 )!=0 >::type _ParentOverlapBounds( UIntPack< Degree1 , Degrees1 ... > , UIntPack< Degree2 , Degrees2 ... > , int corner , int start[] , int end[] )
{
const int OverlapStart = BSplineOverlapSizes< Degree1 , Degree2 >::OverlapStart;
start[0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapStart[ corner & 1 ] - OverlapStart;
end [0] = BSplineOverlapSizes< Degree1 , Degree2 >::ParentOverlapEnd [ corner & 1 ] - OverlapStart + 1;
_ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , corner>>1 , start+1 , end+1 );
}
public:
template< unsigned int ... Degrees >
static bool IsInteriorlySupported( UIntPack< Degrees ... > , int depth , const int offset[] ){ return depth>=0 && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , offset ); }
template< unsigned int ... Degrees >
static bool IsInteriorlySupported( UIntPack< Degrees ... > , int depth , const int offset[] , const double begin[] , const double end[] ){ return depth>=0 && _IsInteriorlySupported( UIntPack< Degrees ... >() , depth , offset , begin , end ); }
template< unsigned int ... Degrees1 , unsigned int ... Degrees2 >
static void InteriorOverlappedSpan( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int depth , int begin[] , int end[] )
{
static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" );
_InteriorOverlappedSpan( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , begin , end );
}
template< unsigned int ... Degrees1 , unsigned int ... Degrees2 >
static bool IsInteriorlyOverlapped( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int depth , const int offset[] )
{
static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" );
return depth>=0 && _IsInteriorlyOverlapped( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , offset );
}
template< unsigned int ... Degrees1 , unsigned int ... Degrees2 >
static void ParentOverlapBounds( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int depth , const int offset[] , int start[] , int end[] )
{
static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" );
if( depth>0 ) _ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , depth , offset , start , end );
}
template< unsigned int ... Degrees1 , unsigned int ... Degrees2 >
static void ParentOverlapBounds( UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , int corner , int start[] , int end[] )
{
static_assert( sizeof ... ( Degrees1 ) == sizeof ... ( Degrees2 ) , "[ERROR] Dimensions don't match" );
_ParentOverlapBounds( UIntPack< Degrees1 ... >() , UIntPack< Degrees2 ... >() , corner , start , end );
}
template< unsigned int Dim >
struct PointEvaluatorState
{
virtual double value( const int offset[] , const unsigned int d[] ) const = 0;
virtual double subValue( const int offset[] , const unsigned int d[] ) const = 0;
template< class Real , typename DerivativeType >
Point< Real , DerivativeType::Size > dValues( const int offset[] ) const
{
Point< Real , DerivativeType::Size > v;
unsigned int _d[Dim];
for( int d=0 ; d<DerivativeType::Size ; d++ )
{
DerivativeType::Factor( d , _d );
v[d] = (Real)value( offset , _d );
}
return v;
}
template< class Real , typename DerivativeType >
Point< Real , DerivativeType::LastDerivative+1 > partialDotDValues( Point< Real , DerivativeType::Size > v , const int offset[] ) const
{
Point< Real , DerivativeType::LastDerivative+1 > dot;
unsigned int _d[Dim];
for( int d=0 ; d<DerivativeType::Size ; d++ )
{
DerivativeType::Factor( d , _d );
dot[ _d[Dim-1] ] += (Real)( subValue( offset , _d ) * v[d] );
}
return dot;
}
};
template< unsigned int ... TDegrees >
struct PointEvaluator< UIntPack< TDegrees ... > >
{
static const unsigned int Dim = sizeof ... ( TDegrees );
};
template< unsigned int ... TDegrees >
struct RestrictionProlongation< UIntPack< TDegrees ... > >
{
virtual void init( void ){ }
virtual double upSampleCoefficient( const int pOff[] , const int cOff[] ) const = 0;
typedef DynamicWindow< double , UIntPack< ( - BSplineSupportSizes< TDegrees >::DownSample0Start + BSplineSupportSizes< TDegrees >::DownSample1End + 1 ) ... > > DownSampleStencil;
struct UpSampleStencil : public DynamicWindow< double , UIntPack< BSplineSupportSizes< TDegrees >::UpSampleSize ... > > { };
struct DownSampleStencils : public DynamicWindow< DownSampleStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > > { };
void init( int highDepth ){ _highDepth = highDepth ; init(); }
void setStencil ( UpSampleStencil & stencil ) const;
void setStencils( DownSampleStencils& stencils ) const;
int highDepth( void ) const { return _highDepth; }
protected:
int _highDepth;
};
template< unsigned int ... TDegrees >
struct System< UIntPack< TDegrees ... > >
{
virtual void init( void ){ }
virtual double ccIntegrate( const int off1[] , const int off2[] ) const = 0;
virtual double pcIntegrate( const int off1[] , const int off2[] ) const = 0;
virtual bool vanishesOnConstants( void ) const { return false; }
virtual RestrictionProlongation< UIntPack< TDegrees ... > >& restrictionProlongation( void ) = 0;
struct CCStencil : public DynamicWindow< double , UIntPack< BSplineOverlapSizes< TDegrees , TDegrees >::OverlapSize ... > >{ };
#ifdef SHOW_WARNINGS
#pragma message ( "[WARNING] Why are the parent/child stencils so big?" )
#endif // SHOW_WARNINGS
struct PCStencils : public DynamicWindow< CCStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > >{ };
void init( int highDepth ){ _highDepth = highDepth ; init(); }
template< bool IterateFirst > void setStencil ( CCStencil & stencil ) const;
template< bool IterateFirst > void setStencils( PCStencils& stencils ) const;
int highDepth( void ) const { return _highDepth; }
protected:
int _highDepth;
};
template< unsigned int ... TDegrees , unsigned int ... CDegrees , unsigned int CDim >
struct Constraint< UIntPack< TDegrees ... > , UIntPack< CDegrees ... > , CDim >
{
static_assert( sizeof...(TDegrees)==sizeof...(CDegrees) , "[ERROR] BaseFEMIntegrator::Constraint: Test and constraint dimensions don't match" );
virtual void init( void ){ ; }
virtual Point< double , CDim > ccIntegrate( const int off1[] , const int off2[] ) const = 0;
virtual Point< double , CDim > pcIntegrate( const int off1[] , const int off2[] ) const = 0;
virtual Point< double , CDim > cpIntegrate( const int off1[] , const int off2[] ) const = 0;
virtual RestrictionProlongation< UIntPack< TDegrees ... > >& tRestrictionProlongation( void ) = 0;
virtual RestrictionProlongation< UIntPack< CDegrees ... > >& cRestrictionProlongation( void ) = 0;
struct CCStencil : public DynamicWindow< Point< double , CDim > , UIntPack< BSplineOverlapSizes< TDegrees , CDegrees >::OverlapSize ... > >{ };
#ifdef SHOW_WARNINGS
#pragma message ( "[WARNING] Why are the parent/child stencils so big?" )
#endif // SHOW_WARNINGS
struct PCStencils : public DynamicWindow< CCStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > >{ };
struct CPStencils : public DynamicWindow< CCStencil , IsotropicUIntPack< sizeof ... ( TDegrees ) , 2 > >{ };
void init( int highDepth ){ _highDepth = highDepth ; init(); }
template< bool IterateFirst > void setStencil ( CCStencil & stencil ) const;
template< bool IterateFirst > void setStencils( PCStencils& stencils ) const;
template< bool IterateFirst > void setStencils( CPStencils& stencils ) const;
int highDepth( void ) const { return _highDepth; }
protected:
int _highDepth;
};
template< unsigned int ... TDegrees >
struct SystemConstraint< UIntPack< TDegrees ... > > : public Constraint< UIntPack< TDegrees ... > , UIntPack< TDegrees ... > , 1 >
{
typedef Constraint< UIntPack< TDegrees ... > , UIntPack< TDegrees ... > , 1 > Base;
SystemConstraint( System< UIntPack< TDegrees ... > >& sys ) : _sys( sys ){;}
void init( void ){ _sys.init( Base::highDepth() ) ; _sys.init(); }
Point< double , 1 > ccIntegrate( const int off1[] , const int off2[] ) const{ return Point< double , 1 >( _sys.ccIntegrate( off1 , off2 ) ); }
Point< double , 1 > pcIntegrate( const int off1[] , const int off2[] ) const{ return Point< double , 1 >( _sys.pcIntegrate( off1 , off2 ) ); }
Point< double , 1 > cpIntegrate( const int off1[] , const int off2[] ) const{ return Point< double , 1 >( _sys.pcIntegrate( off2 , off1 ) ); }
RestrictionProlongation< UIntPack< TDegrees ... > >& tRestrictionProlongation( void ){ return _sys.restrictionProlongation(); }
RestrictionProlongation< UIntPack< TDegrees ... > >& cRestrictionProlongation( void ){ return _sys.restrictionProlongation(); }
protected:
System< UIntPack< TDegrees ... > >& _sys;
};
};
/////////////////////////////////////////////////
// An implementation of the virtual integrator //
/////////////////////////////////////////////////
struct FEMIntegrator
{
protected:
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )==0 , bool >::type _IsValidFEMNode( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] )
{
return !BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] );
}
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )!=0 , bool >::type _IsValidFEMNode( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] )
{
return !BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] ) && _IsValidFEMNode( UIntPack< FEMSigs ... >() , depth , offset+1 );
}
template< unsigned int FEMSig , unsigned ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )==0 , bool >::type _IsOutOfBounds( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] )
{
return BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] );
}
template< unsigned int FEMSig , unsigned ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )!=0 , bool >::type _IsOutOfBounds( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] )
{
return BSplineEvaluationData< FEMSig >::OutOfBounds( depth , offset[0] ) || _IsOutOfBounds( UIntPack< FEMSigs ... >() , depth , offset+1 );
}
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )==0 >::type _BSplineBegin( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int begin[] )
{
begin[0] = BSplineEvaluationData< FEMSig >::Begin( depth );
}
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )!=0 >::type _BSplineBegin( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int begin[] )
{
begin[0] = BSplineEvaluationData< FEMSig >::Begin( depth ) ; _BSplineBegin( UIntPack< FEMSigs ... >() , depth , begin+1 );
}
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )==0 >::type _BSplineEnd( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int end[] )
{
end[0] = BSplineEvaluationData< FEMSig >::End( depth );
}
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )!=0 >::type _BSplineEnd( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , int end[] )
{
end[0] = BSplineEvaluationData< FEMSig >::End( depth ) ; _BSplineEnd( UIntPack< FEMSigs ... >() , depth , end+1 );
}
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )==0 , double >::type _Integral( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] , const double begin[] , const double end[] )
{
return BSplineEvaluationData< FEMSig >::Integral( depth , offset[0] , begin[0] , end[0] , 0 );
}
template< unsigned int FEMSig , unsigned int ... FEMSigs >
static typename std::enable_if< sizeof ... ( FEMSigs )!=0 , double >::type _Integral( UIntPack< FEMSig , FEMSigs ... > , unsigned int depth , const int offset[] , const double begin[] , const double end[] )
{
return BSplineEvaluationData< FEMSig >::Integral( depth , offset[0] , begin[0] , end[0] , 0 ) * _Integral( UIntPack< FEMSigs ... >() , depth , offset+1 , begin+1 , end+1 );
}
public:
template< unsigned int ... FEMSigs >
static double Integral( UIntPack< FEMSigs ... > , int depth , const int offset[] , const double begin[] , const double end[] )
{
if( depth<0 ) return 0;
else return _Integral( UIntPack< FEMSigs ... >() , depth , offset , begin , end );
}
template< unsigned int ... FEMSigs > static bool IsValidFEMNode( UIntPack< FEMSigs ... > , int depth , const int offset[] ){ return _IsValidFEMNode( UIntPack< FEMSigs ... >() , depth , offset ); }
template< unsigned int ... FEMSigs > static bool IsOutOfBounds( UIntPack< FEMSigs ... > , int depth , const int offset[] ){ return depth<0 || _IsOutOfBounds( UIntPack< FEMSigs ... >() , depth , offset ); }
template< unsigned int ... FEMSigs > static void BSplineBegin( UIntPack< FEMSigs ... > , int depth , int begin[] ){ if( depth>=0 ) _BSplineBegin( UIntPack< FEMSigs ... >() , depth , begin ); }
template< unsigned int ... FEMSigs > static void BSplineEnd ( UIntPack< FEMSigs ... > , int depth , int end [] ){ if( depth>=0 ) _BSplineEnd ( UIntPack< FEMSigs ... >() , depth , end ); }
template< typename TSignatures , typename TDerivatives > struct System{};
template< typename TSignatures , typename TDerivatives , typename CSignatures , typename CDerivatives , unsigned int CDim > struct Constraint{};
template< typename TSignatures , typename TDerivatives , typename CSignatures , typename CDerivatives > struct ScalarConstraint{};
template< typename TSignatures > struct RestrictionProlongation{};
template< typename TSignatures , typename TDerivatives > struct PointEvaluator{};
template< typename TSignatures , typename TDerivatives > struct PointEvaluatorState{};
template< unsigned int ... TSignatures , unsigned int ... TDs >
struct PointEvaluatorState< UIntPack< TSignatures ... > , UIntPack< TDs ... > > : public BaseFEMIntegrator::template PointEvaluatorState< sizeof ... ( TSignatures ) >
{
static_assert( sizeof...(TSignatures)==sizeof...(TDs) , "[ERROR] Degree and derivative dimensions don't match" );
static_assert( UIntPack< FEMSignature< TSignatures >::Degree ... >::template Compare< UIntPack< TDs ... > >::GreaterThanOrEqual , "[ERROR] PointEvaluatorState: More derivatives than degrees" );
static const unsigned int Dim = sizeof...(TSignatures);
double value ( const int offset[] , const unsigned int derivatives[] ) const { return _value< Dim >( offset , derivatives ); }
double subValue( const int offset[] , const unsigned int derivatives[] ) const { return _value< Dim-1 >( offset , derivatives ); }
// Bypassing the "auto" keyword
template< unsigned int _Dim >
const double (*(values)( void ) const )[ UIntPack< TDs ... >::template Get< _Dim >()+1 ] { return std::template get< _Dim >( _oneDValues ).values; }
protected:
int _pointOffset[Dim];
template< unsigned int Degree , unsigned int D > struct _OneDValues
{
double values[ BSplineSupportSizes< Degree >::SupportSize ][ D+1 ];
double value( int dOff , unsigned int d ) const
{
if( dOff>=-BSplineSupportSizes< Degree >::SupportEnd && dOff<=-BSplineSupportSizes< Degree >::SupportStart && d<=D ) return values[ dOff+BSplineSupportSizes< Degree >::SupportEnd][d];
else return 0;
}
};
std::tuple< _OneDValues< FEMSignature< TSignatures >::Degree , TDs > ... > _oneDValues;
template< unsigned int MaxDim=Dim , unsigned int I=0 > typename std::enable_if< I==MaxDim , double >::type _value( const int off[] , const unsigned int d[] ) const { return 1.; }
template< unsigned int MaxDim=Dim , unsigned int I=0 > typename std::enable_if< I!=MaxDim , double >::type _value( const int off[] , const unsigned int d[] ) const { return std::get< I >( _oneDValues ).value( off[I]-_pointOffset[I] , d[I] ) * _value< MaxDim , I+1 >( off , d ); }
template< typename T1 , typename T2 > friend struct PointEvaluator;
};
template< unsigned int ... TSignatures , unsigned int ... TDs >
struct PointEvaluator< UIntPack< TSignatures ... > , UIntPack< TDs ... > > : public BaseFEMIntegrator::template PointEvaluator< UIntPack< FEMSignature< TSignatures >::Degree ... > >
{
static_assert( sizeof...(TSignatures)==sizeof...(TDs) , "[ERROR] PointEvaluator: Degree and derivative dimensions don't match" );
static_assert( UIntPack< FEMSignature< TSignatures >::Degree ... >::template Compare< UIntPack< TDs ... > >::GreaterThanOrEqual , "[ERROR] PointEvaluator: More derivatives than degrees" );
static const unsigned int Dim = sizeof ... ( TSignatures );
typedef typename BaseFEMIntegrator::template PointEvaluator< UIntPack< FEMSignature< TSignatures >::Degree ... > > Base;
PointEvaluator( unsigned int maxDepth ) : _maxDepth( maxDepth ) { _init(); }
template< unsigned int ... EDs >
void initEvaluationState( Point< double , Dim > p , unsigned int depth , PointEvaluatorState< UIntPack< TSignatures ... > , UIntPack< EDs ... > >& state ) const
{
unsigned int res = 1<<depth;
for( int d=0 ; d<Dim ; d++ ) state._pointOffset[d] = (int)( p[d] * res );
initEvaluationState( p , depth , state._pointOffset , state );
}
template< unsigned int ... EDs >
void initEvaluationState( Point< double , Dim > p , unsigned int depth , const int* offset , PointEvaluatorState< UIntPack< TSignatures ... > , UIntPack< EDs ... > >& state ) const
{
static_assert( UIntPack< TDs ... >::template Compare< UIntPack< EDs ... > >::GreaterThanOrEqual , "[ERROR] PointEvaluator::init: More evaluation derivatives than stored derivatives" );
for( int d=0 ; d<Dim ; d++ ) state._pointOffset[d] = (int)offset[d];
_initEvaluationState( UIntPack< TSignatures ... >() , UIntPack< EDs ... >() , &p[0] , depth , state );
}
protected:
unsigned int _maxDepth;
std::tuple< BSplineData< TSignatures , TDs > ... > _bSplineData;
template< unsigned int I=0 > typename std::enable_if< I==Dim >::type _init( void ){}
template< unsigned int I=0 > typename std::enable_if< I< Dim >::type _init( void ){ std::get< I >( _bSplineData ).reset( _maxDepth ) ; _init< I+1 >( ); }
template< unsigned int I , unsigned int TSig , unsigned int D , typename State >
void _setEvaluationState( const double* p , unsigned int depth , State& state ) const
{
static const int LeftSupportRadius = -BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportStart;
static const int LeftPointSupportRadius = BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportEnd ;
static const int RightSupportRadius = BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportEnd ;
static const int RightPointSupportRadius = -BSplineSupportSizes< FEMSignature< TSig >::Degree >::SupportStart;
for( int s=-LeftPointSupportRadius ; s<=RightPointSupportRadius ; s++ )
{
int pIdx = state._pointOffset[I];
int fIdx = state._pointOffset[I]+s;
double _p = p[I];
const Polynomial< FEMSignature< TSig >::Degree >* components = std::get< I >( _bSplineData )[depth].polynomialsAndOffset( _p , pIdx , fIdx );
for( int d=0 ; d<=D ; d++ ) std::get< I >( state._oneDValues ).values[ s+LeftPointSupportRadius ][d] = components[d]( _p );
}
}
template< typename State , unsigned int TSig , unsigned int ... TSigs , unsigned int D , unsigned int ... Ds >
typename std::enable_if< sizeof...(TSigs)==0 >::type _initEvaluationState( UIntPack< TSig , TSigs ... > , UIntPack< D , Ds ... > , const double* p , unsigned int depth , State& state ) const
{
_setEvaluationState< Dim-1 , TSig , D >( p , depth , state );
}
template< typename State , unsigned int TSig , unsigned int ... TSigs , unsigned int D , unsigned int ... Ds >
typename std::enable_if< sizeof...(TSigs)!=0 >::type _initEvaluationState( UIntPack< TSig , TSigs ... > , UIntPack< D , Ds ... > , const double* p , unsigned int depth , State& state ) const
{
_setEvaluationState< Dim-1-sizeof...(TSigs) , TSig , D >( p , depth , state );
_initEvaluationState( UIntPack< TSigs ... >() , UIntPack< Ds ... >() , p , depth , state );
}
};
template< unsigned int ... TSignatures >
struct RestrictionProlongation< UIntPack< TSignatures ... > > : public BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > >
{
static const unsigned int Dim = sizeof ... ( TSignatures );
typedef typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > > Base;
double upSampleCoefficient( const int pOff[] , const int cOff[] ) const { return _coefficient( pOff , cOff ); }
void init( unsigned int depth ){ Base::init( depth ); }
void init( void ){ _init( Base::highDepth() ); }
protected:
std::tuple< typename BSplineEvaluationData< TSignatures >::UpSampleEvaluator ... > _upSamplers;
template< unsigned int D=0 > typename std::enable_if< D==Dim >::type _init( int highDepth ){ }
template< unsigned int D=0 > typename std::enable_if< D< Dim >::type _init( int highDepth ){ std::get< D >( _upSamplers ).set( highDepth-1 ) ; _init< D+1 >( highDepth ); }
template< unsigned int D=0 > typename std::enable_if< D==Dim , double >::type _coefficient( const int pOff[] , const int cOff[] ) const { return 1.; }
template< unsigned int D=0 > typename std::enable_if< D< Dim , double >::type _coefficient( const int pOff[] , const int cOff[] ) const { return _coefficient< D+1 >( pOff , cOff ) * std::get< D >( _upSamplers ).value( pOff[D] , cOff[D] ); }
};
template< unsigned int ... TSignatures , unsigned int ... TDerivatives , unsigned int ... CSignatures , unsigned int ... CDerivatives , unsigned int CDim >
struct Constraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > , CDim > : public BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< CSignatures >::Degree ... > , CDim >
{
static_assert( sizeof ... ( TSignatures ) == sizeof ... ( CSignatures ) , "[ERROR] Test signatures and contraint signatures must have the same dimension" );
static_assert( sizeof ... ( TSignatures ) == sizeof ... ( TDerivatives ) , "[ERROR] Test signatures and derivatives must have the same dimension" );
static_assert( sizeof ... ( CSignatures ) == sizeof ... ( CDerivatives ) , "[ERROR] Constraint signatures and derivatives must have the same dimension" );
static_assert( UIntPack< FEMSignature< TSignatures >::Degree ... >::template Compare< UIntPack< TDerivatives ... > >::GreaterThanOrEqual , "[ERROR] Test functions cannot have more derivatives than the degree" );
static_assert( UIntPack< FEMSignature< CSignatures >::Degree ... >::template Compare< UIntPack< CDerivatives ... > >::GreaterThanOrEqual , "[ERROR] Test functions cannot have more derivatives than the degree" );
static const unsigned int Dim = sizeof ... ( TSignatures );
typedef typename BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< CSignatures >::Degree ... > , CDim > Base;
static const unsigned int TDerivativeSize = TensorDerivatives< UIntPack< TDerivatives ... > >::Size;
static const unsigned int CDerivativeSize = TensorDerivatives< UIntPack< CDerivatives ... > >::Size;
static inline void TFactorDerivatives( unsigned int idx , unsigned int d[ Dim ] ){ TensorDerivatives< UIntPack< TDerivatives ... > >::Factor( idx , d ); }
static inline void CFactorDerivatives( unsigned int idx , unsigned int d[ Dim ] ){ TensorDerivatives< UIntPack< CDerivatives ... > >::Factor( idx , d ); }
static inline unsigned int TDerivativeIndex( const unsigned int d[ Dim ] ){ return TensorDerivatives< UIntPack< TDerivatives ... > >::Index( d ); }
static inline unsigned int CDerivativeIndex( const unsigned int d[ Dim ] ){ return TensorDerivatives< UIntPack< CDerivatives ... > >::Index( d ); }
Matrix< double , TDerivativeSize , CDerivativeSize > weights[CDim];
Point< double , CDim > ccIntegrate( const int off1[] , const int off2[] ) const { return _integrate( INTEGRATE_CHILD_CHILD , off1 , off2 ); }
Point< double , CDim > pcIntegrate( const int off1[] , const int off2[] ) const { return _integrate( INTEGRATE_PARENT_CHILD , off1 , off2 ); }
Point< double , CDim > cpIntegrate( const int off1[] , const int off2[] ) const { return _integrate( INTEGRATE_CHILD_PARENT , off1 , off2 ); }
void init( unsigned int depth ){ Base::init( depth ); }
void init( void )
{
_init( Base::highDepth() );
_weightedIndices.resize(0);
for( unsigned int d1=0 ; d1<TDerivativeSize ; d1++ ) for( unsigned int d2=0 ; d2<CDerivativeSize ; d2++ )
{
_WeightedIndices w(d1,d2);
for( unsigned int c=0 ; c<CDim ; c++ ) if( weights[c](d1,d2)>0 ) w.indices.push_back( std::pair< unsigned int , double >( c , weights[c](d1,d2) ) );
if( w.indices.size() ) _weightedIndices.push_back(w);
}
}
typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > >& tRestrictionProlongation( void ){ return _tRestrictionProlongation; }
typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< CSignatures >::Degree ... > >& cRestrictionProlongation( void ){ return _cRestrictionProlongation; }
protected:
RestrictionProlongation< UIntPack< TSignatures ... > > _tRestrictionProlongation;
RestrictionProlongation< UIntPack< CSignatures ... > > _cRestrictionProlongation;
struct _WeightedIndices
{
_WeightedIndices( unsigned int _d1=0 , unsigned int _d2=0 ) : d1(_d1) , d2(_d2) { ; }
unsigned int d1 , d2;
std::vector< std::pair< unsigned int , double > > indices;
};
std::vector< _WeightedIndices > _weightedIndices;
enum IntegrationType
{
INTEGRATE_CHILD_CHILD ,
INTEGRATE_PARENT_CHILD ,
INTEGRATE_CHILD_PARENT
};
template< unsigned int _TSig , unsigned int _TDerivatives , unsigned int _CSig , unsigned int _CDerivatives >
struct _Integrators
{
typename BSplineIntegrationData< _TSig , _CSig >::FunctionIntegrator::template Integrator< _TDerivatives , _CDerivatives > ccIntegrator;
typename BSplineIntegrationData< _TSig , _CSig >::FunctionIntegrator::template ChildIntegrator< _TDerivatives , _CDerivatives > pcIntegrator;
typename BSplineIntegrationData< _CSig , _TSig >::FunctionIntegrator::template ChildIntegrator< _CDerivatives , _TDerivatives > cpIntegrator;
};
std::tuple< _Integrators< TSignatures , TDerivatives , CSignatures , CDerivatives > ... > _integrators;
template< unsigned int D=0 >
typename std::enable_if< D==Dim >::type _init( int depth ){ ; }
template< unsigned int D=0 >
typename std::enable_if< D< Dim >::type _init( int depth )
{
std::get< D >( _integrators ).ccIntegrator.set( depth );
if( depth ) std::get< D >( _integrators ).pcIntegrator.set( depth-1 ) , std::get< D >( _integrators ).cpIntegrator.set( depth-1 );
_init< D+1 >( depth );
}
template< unsigned int D=0 >
typename std::enable_if< D==Dim , double >::type _integral( IntegrationType iType , const int off1[] , const int off2[] , const unsigned int d1[] , const unsigned int d2[] ) const { return 1.; }
template< unsigned int D=0 >
typename std::enable_if< D< Dim , double >::type _integral( IntegrationType iType , const int off1[] , const int off2[] , const unsigned int d1[] , const unsigned int d2[] ) const
{
double remainingIntegral = _integral< D+1 >( iType , off1 , off2 , d1 , d2 );
switch( iType )
{
case INTEGRATE_CHILD_CHILD: return std::get< D >( _integrators ).ccIntegrator.dot( off1[D] , off2[D] , d1[D] , d2[D] ) * remainingIntegral;
case INTEGRATE_PARENT_CHILD: return std::get< D >( _integrators ).pcIntegrator.dot( off1[D] , off2[D] , d1[D] , d2[D] ) * remainingIntegral;
case INTEGRATE_CHILD_PARENT: return std::get< D >( _integrators ).cpIntegrator.dot( off2[D] , off1[D] , d2[D] , d1[D] ) * remainingIntegral;
default: ERROR_OUT( "Undefined integration type" );
}
return 0;
}
Point< double , CDim > _integrate( IntegrationType iType , const int off1[] , const int off[] ) const;
};
template< unsigned int ... TSignatures , unsigned int ... TDerivatives , unsigned int ... CSignatures , unsigned int ... CDerivatives >
struct ScalarConstraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > > : public Constraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > , 1 >
{
static const unsigned int Dim = sizeof ... ( TSignatures );
typedef typename BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< CSignatures >::Degree ... > , 1 > Base;
typedef Constraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< CSignatures ... > , UIntPack< CDerivatives ... > , 1 > FullConstraint;
using FullConstraint::weights;
// [NOTE] We define the constructor using a recursive function call to take into account multiplicity (e.g. so that d^2/dxdy and d^2/dydx each contribute)
ScalarConstraint( const std::initializer_list< double >& w )
{
std::function< void ( unsigned int[] , const double[] , unsigned int ) > SetDerivativeWeights = [&]( unsigned int derivatives[Dim] , const double w[] , unsigned int d )
{
unsigned int idx1 = FullConstraint::TDerivativeIndex( derivatives ) , idx2 = FullConstraint::CDerivativeIndex( derivatives );
weights[0][idx1][idx2] += w[0];
if( d>0 ) for( int dd=0 ; dd<Dim ; dd++ ){ derivatives[dd]++ ; SetDerivativeWeights( derivatives , w+1 , d-1 ) ; derivatives[dd]--; }
};
static const unsigned int DMax = std::min< unsigned int >( UIntPack< TDerivatives ... >::Min() , UIntPack< CDerivatives ... >::Min() );
unsigned int derivatives[Dim];
double _w[DMax+1];
memset( _w , 0 , sizeof(_w) );
{
unsigned int dd=0;
for( typename std::initializer_list< double >::const_iterator iter=w.begin() ; iter!=w.end() && dd<=DMax ; dd++ , iter++ ) _w[dd] = *iter;
}
for( int d=0 ; d<Dim ; d++ ) derivatives[d] = 0;
if( w.size() ) SetDerivativeWeights( derivatives , _w , std::min< unsigned int >( DMax+1 , (unsigned int)w.size() )-1 );
}
};
template< unsigned int ... TSignatures , unsigned int ... TDerivatives >
struct System< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > > : public BaseFEMIntegrator::template System< UIntPack< FEMSignature< TSignatures >::Degree... > >
{
static_assert( sizeof ... ( TSignatures ) == sizeof ... ( TDerivatives ) , "[ERROR] Test signatures and derivatives must have the same dimension" );
static const unsigned int Dim = sizeof ... ( TSignatures );
typedef typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< TSignatures >::Degree... > > Base;
System( const std::initializer_list< double >& w ) : _sc( w ){ ; }
void init( unsigned int depth ){ Base::init( depth ); }
void init( void ){ ( (BaseFEMIntegrator::template Constraint< UIntPack< FEMSignature< TSignatures >::Degree ... > , UIntPack< FEMSignature< TSignatures >::Degree ... > , 1 >&)_sc ).init( BaseFEMIntegrator::template System< UIntPack< FEMSignature< TSignatures >::Degree... > >::_highDepth ); }
double ccIntegrate( const int off1[] , const int off2[] ) const { return _sc.ccIntegrate( off1 , off2 )[0]; }
double pcIntegrate( const int off1[] , const int off2[] ) const { return _sc.pcIntegrate( off1 , off2 )[0]; }
bool vanishesOnConstants( void ) const { return _sc.weights[0][0][0]==0; }
typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< FEMSignature< TSignatures >::Degree ... > >& restrictionProlongation( void ){ return _sc.tRestrictionProlongation(); }
protected:
ScalarConstraint< UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > , UIntPack< TSignatures ... > , UIntPack< TDerivatives ... > > _sc;
};
};
//////////////////////////////////////////
template< unsigned int Dim > inline void SetGhostFlag( RegularTreeNode< Dim , FEMTreeNodeData >* node , bool flag ){ if( node && node->parent ) node->parent->nodeData.setGhostFlag( flag ); }
template< unsigned int Dim > inline bool GetGhostFlag( const RegularTreeNode< Dim , FEMTreeNodeData >* node ){ return node==NULL || node->parent==NULL || node->parent->nodeData.getGhostFlag( ); }
template< unsigned int Dim > inline bool IsActiveNode( const RegularTreeNode< Dim , FEMTreeNodeData >* node ){ return !GetGhostFlag< Dim >( node ); }
template< unsigned int Dim , class Real , class Vertex > struct IsoSurfaceExtractor;
template< unsigned int Dim , class Data >
struct NodeSample
{
RegularTreeNode< Dim , FEMTreeNodeData >* node;
Data data;
};
template< unsigned int Dim , class Real >
struct NodeAndPointSample
{
RegularTreeNode< Dim , FEMTreeNodeData >* node;
ProjectiveData< Point< Real , Dim > , Real > sample;
// bool is_confidence_point; // modified by dojo
NodeAndPointSample() {
//is_confidence_point = true;
} // modified by dojo
};
template< unsigned int Dim , class Real > using NodeSimplices = NodeSample< Dim , std::vector< Simplex< Real , Dim , Dim-1 > > >;
template< typename T > struct WindowLoopData{ };
template< unsigned int ... Sizes >
struct WindowLoopData< UIntPack< Sizes ... > >
{
static const int Dim = sizeof ... ( Sizes );
unsigned int size[1<<Dim];
unsigned int indices[1<<Dim][ WindowSize< UIntPack< Sizes ... > >::Size ];
WindowLoopData( std::function< void ( int c , int* , int* ) > boundsFunction )
{
int start[Dim] , end[Dim];
for( int c=0 ; c<(1<<Dim) ; c++ )
{
size[c] = 0;
boundsFunction( c , start , end );
unsigned int idx[Dim];
WindowLoop< Dim >::Run
(
start , end ,
[&]( int d , int i ){ idx[d] = i; } ,
[&]( void ){ indices[c][ size[c]++ ] = GetWindowIndex( UIntPack< Sizes ... >() , idx ); }
);
}
}
};
template< class Data >
void AddAtomic( Data& a , const Data& b )
{
#pragma omp critical
a += b;
}
template< class Real , unsigned int Dim >
void AddAtomic( Point< Real , Dim >& a , const Point< Real , Dim >& b )
{
for( int d=0 ; d<Dim ; d++ ) AddAtomic( a[d] , b[d] );
}
void AddAtomic( float& a , const float& b )
{
#pragma omp atomic
a += b;
}
void AddAtomic( double& a , const double& b )
{
#pragma omp atomic
a += b;
}
template< class Data >
bool IsZero( const Data& data ){ return false; }
template< class Real , unsigned int Dim >
bool IsZero( const Point< Real , Dim >& d )
{
bool zero = true;
for( int i=0 ; i<Dim ; i++ ) zero &= (d[i]==0);
return zero;
}
bool IsZero( const float& f ){ return f==0.f; }
bool IsZero( const double& f ){ return f==0.; }
template< unsigned int Dim , class Real >
class FEMTree
{
public:
typedef RegularTreeNode< Dim , FEMTreeNodeData > FEMTreeNode;
Allocator< FEMTreeNode >* nodeAllocator;
bool aux_vfield_mode; // modified by dojo
protected:
template< unsigned int _Dim , class _Real , class Vertex > friend struct IsoSurfaceExtractor;
std::atomic< int > _nodeCount;
void _nodeInitializer( FEMTreeNode& node ){ node.nodeData.nodeIndex = _nodeCount++; }
struct _NodeInitializer
{
FEMTree& femTree;
_NodeInitializer( FEMTree& f ) : femTree(f){;}
void operator() ( FEMTreeNode& node ){ femTree._nodeInitializer( node ); }
};
public:
typedef int LocalDepth;
typedef int LocalOffset[Dim];
int nodeCount( void ) const { return _nodeCount; }
typedef NodeAndPointSample< Dim , Real > PointSample;
typedef typename FEMTreeNode::template NeighborKey< IsotropicUIntPack< Dim , 1 > , IsotropicUIntPack< Dim , 1 > > OneRingNeighborKey;
typedef typename FEMTreeNode::template ConstNeighborKey< IsotropicUIntPack< Dim , 1 > , IsotropicUIntPack< Dim , 1 > > ConstOneRingNeighborKey;
typedef typename FEMTreeNode::template Neighbors< IsotropicUIntPack< Dim , 3 > > OneRingNeighbors;
typedef typename FEMTreeNode::template ConstNeighbors< IsotropicUIntPack< Dim , 3 > > ConstOneRingNeighbors;
template< typename FEMDegreePack > using BaseSystem = typename BaseFEMIntegrator::template System< FEMDegreePack >;
template< typename FEMSigPack , typename DerivativePack > using PointEvaluator = typename FEMIntegrator::template PointEvaluator< FEMSigPack , DerivativePack >;
template< typename FEMSigPack , typename DerivativePack > using PointEvaluatorState = typename FEMIntegrator::template PointEvaluatorState< FEMSigPack , DerivativePack >;
template< typename FEMDegreePack > using CCStencil = typename BaseSystem< FEMDegreePack >::CCStencil;
template< typename FEMDegreePack > using PCStencils = typename BaseSystem< FEMDegreePack >::PCStencils;
template< unsigned int ... FEMSigs > bool isValidFEMNode( UIntPack< FEMSigs ... > , const FEMTreeNode* node ) const;
bool isValidSpaceNode( const FEMTreeNode* node ) const;
const FEMTreeNode* leaf( Point< Real , Dim > p ) const;
FEMTreeNode* leaf( Point< Real , Dim > p , LocalDepth maxDepth=-1 );
// [NOTE] In the case that T != double, we require both operators() for computing the system dual
template< typename T , unsigned int PointD >
struct InterpolationInfo
{
virtual void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const = 0;
virtual Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const = 0;
virtual Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const = 0;
virtual Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const = 0;
virtual const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const = 0;
virtual bool constrainsDCTerm( void ) const = 0;
virtual ~InterpolationInfo( void ){}
DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIndex ){ return const_cast< DualPointInfo< Dim , Real , T , PointD >& >( ( ( const InterpolationInfo* )this )->operator[]( pointIndex ) ); }
};
template< unsigned int PointD >
struct InterpolationInfo< double , PointD >
{
virtual void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const = 0;
virtual Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const = 0;
virtual Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const = 0;
virtual const DualPointInfo< Dim , Real , double , PointD >& operator[]( size_t pointIdx ) const = 0;
virtual bool constrainsDCTerm( void ) const = 0;
virtual ~InterpolationInfo( void ){}
DualPointInfo< Dim , Real , double , PointD >& operator[]( size_t pointIndex ){ return const_cast< DualPointInfo< Dim , Real , double , PointD >& >( ( ( const InterpolationInfo* )this )->operator[]( pointIndex ) ); }
};
template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximatePointInterpolationInfo : public InterpolationInfo< T , PointD >
{
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = idx , end = idx+1;
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); }
ApproximatePointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
SparseNodeData< DualPointInfo< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData;
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximatePointInterpolationInfo< double , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD >
{
typedef double T;
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = idx , end = idx+1;
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); }
ApproximatePointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
SparseNodeData< DualPointInfo< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData;
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximatePointAndDataInterpolationInfo : public InterpolationInfo< T , PointD >
{
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = idx , end = idx+1;
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); }
ApproximatePointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
SparseNodeData< DualPointAndDataInfo< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData;
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximatePointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD >
{
typedef double T;
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = idx , end = idx+1;
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); }
ApproximatePointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
SparseNodeData< DualPointAndDataInfo< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData;
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximateChildPointInterpolationInfo : public InterpolationInfo< T , PointD >
{
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size();
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).position ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).position , dValues ); }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).position , dValues ); }
ApproximateChildPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
static const unsigned int _Mask = (1<<Dim)-1;
SparseNodeData< DualPointInfoBrood< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData;
DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
const DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximateChildPointInterpolationInfo< double , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD >
{
typedef double T;
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size();
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).position ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).position , dValues ); }
ApproximateChildPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
static const unsigned int _Mask = (1<<Dim)-1;
SparseNodeData< DualPointInfoBrood< Dim , Real , T , PointD > , ZeroUIntPack< Dim > > _iData;
DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
const DualPointInfo< Dim , Real , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximateChildPointAndDataInterpolationInfo : public InterpolationInfo< T , PointD >
{
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size();
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx).pointInfo; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data , dValues ); }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data , dValues ); }
ApproximateChildPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
static const unsigned int _Mask = (1<<Dim)-1;
SparseNodeData< DualPointAndDataInfoBrood< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData;
DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
const DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ApproximateChildPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD >
{
typedef double T;
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const
{
int idx = _iData.index( node );
if( idx<0 ) begin = end = 0;
else begin = (idx<<Dim) , end = (idx<<Dim) | _iData[idx].size();
}
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return __iData(pointIdx).pointInfo; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( __iData(pointIdx).pointInfo.position , __iData(pointIdx).data , dValues ); }
ApproximateChildPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
static const unsigned int _Mask = (1<<Dim)-1;
SparseNodeData< DualPointAndDataInfoBrood< Dim , Real , Data , T , PointD > , ZeroUIntPack< Dim > > _iData;
DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ){ return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
const DualPointAndDataInfo< Dim , Real , Data , T , PointD >& __iData( size_t pointIdx ) const { return _iData[ (int)(pointIdx>>Dim) ][ pointIdx & _Mask ]; }
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ExactPointInterpolationInfo : public InterpolationInfo< T , PointD >
{
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; }
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); }
ExactPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
void _init( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , bool noRescale );
std::vector< std::pair< int , int > > _sampleSpan;
std::vector< DualPointInfo< Dim , Real , T , PointD > > _iData;
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ExactPointInterpolationInfo< double , PointD , ConstraintDual , SystemDual > : public InterpolationInfo< double , PointD >
{
typedef double T;
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; }
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ]; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ (int)pointIdx ].position ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].position , dValues ); }
ExactPointInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
void _init( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , bool noRescale );
std::vector< std::pair< int , int > > _sampleSpan;
std::vector< DualPointInfo< Dim , Real , T , PointD > > _iData;
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct _ExactPointAndDataInterpolationInfo : public InterpolationInfo< T , PointD >
{
_ExactPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _constraintDual( constraintDual ) , _systemDual( systemDual ) , _constrainsDCTerm( constrainsDCTerm ) { }
protected:
void _init( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , bool noRescale );
std::vector< std::pair< int , int > > _sampleSpan;
std::vector< DualPointAndDataInfo< Dim , Real , Data , T , PointD > > _iData;
bool _constrainsDCTerm;
ConstraintDual _constraintDual;
SystemDual _systemDual;
friend class FEMTree< Dim , Real >;
};
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ExactPointAndDataInterpolationInfo : public _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >
{
using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_sampleSpan;
using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_constrainsDCTerm;
using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_iData;
using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_constraintDual;
using _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >::_systemDual;
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; }
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , T , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); }
Point< T , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< T , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); }
ExactPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ) { }
};
template< typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
struct ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual > : public _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >
{
using _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >::_sampleSpan;
using _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >::_constrainsDCTerm;
using _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >::_iData;
void range( const FEMTreeNode* node , size_t& begin , size_t& end ) const { begin = _sampleSpan[ node->nodeData.nodeIndex ].first , end = _sampleSpan[ node->nodeData.nodeIndex ].second; }
bool constrainsDCTerm( void ) const { return _constrainsDCTerm; }
const DualPointInfo< Dim , Real , double , PointD >& operator[]( size_t pointIdx ) const { return _iData[ (int)pointIdx ].pointInfo; }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx ) const { return _constraintDual( _iData[ pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data ); }
Point< double , CumulativeDerivatives< Dim , PointD >::Size > operator() ( size_t pointIdx , const Point< double , CumulativeDerivatives< Dim , PointD >::Size >& dValues ) const { return _systemDual( _iData[ (int)pointIdx ].pointInfo.position , _iData[ (int)pointIdx ].data , dValues ); }
ExactPointAndDataInterpolationInfo( ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm ) : _ExactPointAndDataInterpolationInfo< double , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm ) { }
};
template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual >
static ApproximatePointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* InitializeApproximatePointInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , int adaptiveExponent )
{
ApproximatePointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* a = new ApproximatePointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm );
a->_iData = tree._densifyInterpolationInfoAndSetDualConstraints< T , PointD >( samples , constraintDual , adaptiveExponent );
return a;
}
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
static ApproximatePointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* InitializeApproximatePointAndDataInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , int adaptiveExponent )
{
ApproximatePointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* a = new ApproximatePointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm );
a->_iData = tree._densifyInterpolationInfoAndSetDualConstraints< T , Data , PointD >( samples , sampleData , constraintDual , adaptiveExponent );
return a;
}
template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual >
static ApproximateChildPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* InitializeApproximateChildPointInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale )
{
ApproximateChildPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* a = new ApproximateChildPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm );
a->_iData = tree._densifyChildInterpolationInfoAndSetDualConstraints< T , PointD >( samples , constraintDual , noRescale );
return a;
}
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
static ApproximateChildPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* InitializeApproximateChildPointAndDataInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale )
{
ApproximateChildPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* a = new ApproximateChildPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm );
a->_iData = tree._densifyChildInterpolationInfoAndSetDualConstraints< T , Data , PointD >( samples , sampleData , constraintDual , noRescale );
return a;
}
template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual >
static ExactPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* InitializeExactPointInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale )
{
ExactPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >* e = new ExactPointInterpolationInfo< T , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm );
e->_init( tree , samples , noRescale );
return e;
}
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual >
static ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* InitializeExactPointAndDataInterpolationInfo( const class FEMTree< Dim , Real >& tree , const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , SystemDual systemDual , bool constrainsDCTerm , bool noRescale )
{
ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >* e = new ExactPointAndDataInterpolationInfo< T , Data , PointD , ConstraintDual , SystemDual >( constraintDual , systemDual , constrainsDCTerm );
e->_init( tree , samples , sampleData , noRescale );
return e;
}
template< typename T , unsigned int PointD , typename ConstraintDual , typename SystemDual > friend struct ExactPointInterpolationInfo;
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual , typename SystemDual > friend struct ExactPointAndDataInterpolationInfo;
template< typename T , unsigned int PointD , unsigned int ... PointDs >
static bool ConstrainsDCTerm( const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ){ return ConstrainsDCTerm( iInfo ) || ConstrainsDCTerm( iInfos... ); }
template< typename T , unsigned int PointD >
static bool ConstrainsDCTerm( const InterpolationInfo< T , PointD >* iInfo ){ return iInfo && iInfo->constrainsDCTerm(); }
static bool ConstrainsDCTerm( void ){ return false; }
#ifdef SHOW_WARNINGS
#pragma message( "[WARNING] This should not be isotropic" )
#endif // SHOW_WARNINGS
template< unsigned int DensityDegree > struct DensityEstimator : public SparseNodeData< Real , IsotropicUIntPack< Dim , FEMDegreeAndBType< DensityDegree >::Signature > >
{
DensityEstimator( int kernelDepth , int coDimension ) : _kernelDepth( kernelDepth ) , _coDimension( coDimension ){ ; }
int coDimension( void ) const { return _coDimension; }
int kernelDepth( void ) const { return _kernelDepth; }
protected:
int _kernelDepth , _coDimension;
};
protected:
bool _isValidSpaceNode( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::SPACE_FLAG ); }
bool _isValidFEM1Node ( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::FEM_FLAG_1 ); }
bool _isValidFEM2Node ( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::FEM_FLAG_2 ); }
bool _isRefinableNode ( const FEMTreeNode* node ) const { return !GetGhostFlag< Dim >( node ) && ( node->nodeData.flags & FEMTreeNodeData::REFINABLE_FLAG ); }
FEMTreeNode* _tree;
FEMTreeNode* _spaceRoot;
SortedTreeNodes< Dim > _sNodes;
LocalDepth _maxDepth;
int _depthOffset;
mutable unsigned int _femSigs1[ Dim ];
mutable unsigned int _femSigs2[ Dim ];
mutable unsigned int _refinableSigs[ Dim ];
static bool _InBounds( Point< Real , Dim > p );
int _localToGlobal( LocalDepth d ) const { return d + _depthOffset; }
LocalDepth _localDepth( const FEMTreeNode* node ) const { return node->depth() - _depthOffset; }
int _localInset( LocalDepth d ) const { return _depthOffset<=1 ? 0 : 1<<( d + _depthOffset - 1 ); }
void _localDepthAndOffset( const FEMTreeNode* node , LocalDepth& d , LocalOffset& off ) const
{
node->depthAndOffset( d , off ) ; d -= _depthOffset;
int inset = _localInset( d );
for( int d=0 ; d<Dim ; d++ ) off[d] -= inset;
}
template< unsigned int FEMSig > static int _BSplineBegin( LocalDepth depth ){ return BSplineEvaluationData< FEMSig >::Begin( depth ); }
template< unsigned int FEMSig > static int _BSplineEnd ( LocalDepth depth ){ return BSplineEvaluationData< FEMSig >::End ( depth ); }
template< unsigned int ... FEMSigs >
bool _outOfBounds( UIntPack< FEMSigs ... > , const FEMTreeNode* node ) const
{
if( !node ) return true;
LocalDepth d ; LocalOffset off ; _localDepthAndOffset( node , d , off );
return FEMIntegrator::IsOutOfBounds( UIntPack< FEMSigs ... >() , d , off );
}
int _sNodesBegin( LocalDepth d ) const { return _sNodes.begin( _localToGlobal( d ) ); }
int _sNodesEnd ( LocalDepth d ) const { return _sNodes.end ( _localToGlobal( d ) ); }
int _sNodesSize ( LocalDepth d ) const { return _sNodes.size ( _localToGlobal( d ) ); }
int _sNodesBeginSlice( LocalDepth d ) const { return _localInset(d); }
int _sNodesEndSlice( LocalDepth d ) const{ return ( 1<<_localToGlobal(d) ) - _localInset(d) - 1; }
int _sNodesBegin( LocalDepth d , int slice ) const { return _sNodes.begin( _localToGlobal( d ) , slice + _localInset( d ) ); }
int _sNodesEnd ( LocalDepth d , int slice ) const { return _sNodes.end ( _localToGlobal( d ) , slice + _localInset( d ) ); }
int _sNodesSize ( LocalDepth d , int slice ) const { return _sNodes.size ( _localToGlobal( d ) , slice + _localInset( d ) ); }
template< unsigned int FEMDegree > static bool _IsInteriorlySupported( LocalDepth depth , const LocalOffset off )
{
if( depth>=0 )
{
int begin , end;
BSplineSupportSizes< FEMDegree >::InteriorSupportedSpan( depth , begin , end );
bool interior = true;
for( int dd=0 ; dd<Dim ; dd++ ) interior &= off[dd]>=begin && off[dd]<end;
return interior;
}
else return false;
}
template< unsigned int FEMDegree > bool _isInteriorlySupported( const FEMTreeNode* node ) const
{
if( !node ) return false;
LocalDepth d ; LocalOffset off;
_localDepthAndOffset( node , d , off );
return _IsInteriorlySupported< FEMDegree >( d , off );
}
template< unsigned int ... FEMDegrees > static bool _IsInteriorlySupported( UIntPack< FEMDegrees ... > , LocalDepth depth , const LocalOffset off ){ return BaseFEMIntegrator::IsInteriorlySupported( UIntPack< FEMDegrees ... >() , depth , off ); }
template< unsigned int ... FEMDegrees > bool _isInteriorlySupported( UIntPack< FEMDegrees ... > , const FEMTreeNode* node ) const
{
if( !node ) return false;
LocalDepth d ; LocalOffset off ; _localDepthAndOffset( node , d , off );
return _IsInteriorlySupported< FEMDegrees ... >( UIntPack< FEMDegrees ... >() , d , off );
}
template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > static bool _IsInteriorlyOverlapped( LocalDepth depth , const LocalOffset off )
{
if( depth>=0 )
{
int begin , end;
BSplineIntegrationData< FEMDegreeAndBType< FEMDegree1 , BOUNDARY_NEUMANN >::Signature , FEMDegreeAndBType< FEMDegree2 , BOUNDARY_NEUMANN >::Signature >::InteriorOverlappedSpan( depth , begin , end );
bool interior = true;
for( int dd=0 ; dd<Dim ; dd++ ) interior &= off[dd]>=begin && off[dd]<end;
return interior;
}
else return false;
}
template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > bool _isInteriorlyOverlapped( const FEMTreeNode* node ) const
{
if( !node ) return false;
LocalDepth d ; LocalOffset off;
_localDepthAndOffset( node , d , off );
return _IsInteriorlyOverlapped< FEMDegree1 , FEMDegree2 >( d , off );
}
template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > static bool _IsInteriorlyOverlapped( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , LocalDepth depth , const LocalOffset off ){ return BaseFEMIntegrator::IsInteriorlyOverlapped( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , depth , off ); }
template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > bool _isInteriorlyOverlapped( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , const FEMTreeNode* node ) const
{
if( !node ) return false;
LocalDepth d ; LocalOffset off ; _localDepthAndOffset( node , d , off );
return _IsInteriorlyOverlapped( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , d , off );
}
void _startAndWidth( const FEMTreeNode* node , Point< Real , Dim >& start , Real& width ) const
{
LocalDepth d ; LocalOffset off;
_localDepthAndOffset( node , d , off );
if( d>=0 ) width = Real( 1.0 / (1<< d ) );
else width = Real( 1.0 * (1<<(-d)) );
for( int dd=0 ; dd<Dim ; dd++ ) start[dd] = Real( off[dd] ) * width;
}
void _centerAndWidth( const FEMTreeNode* node , Point< Real , Dim >& center , Real& width ) const
{
int d , off[Dim];
_localDepthAndOffset( node , d , off );
width = Real( 1.0 / (1<<d) );
for( int dd=0 ; dd<Dim ; dd++ ) center[dd] = Real( off[dd] + 0.5 ) * width;
}
int _childIndex( const FEMTreeNode* node , Point< Real , Dim > p ) const
{
Point< Real , Dim > c ; Real w;
_centerAndWidth( node , c , w );
int cIdx = 0;
for( int d=0 ; d<Dim ; d++ ) if( p[d]>=c[d] ) cIdx |= (1<<d);
return cIdx;
}
template< unsigned int ... Degrees > void _setFullDepth( UIntPack< Degrees ... > , FEMTreeNode* node , LocalDepth depth );
template< unsigned int ... Degrees > void _setFullDepth( UIntPack< Degrees ... > , LocalDepth depth );
template< unsigned int ... Degrees > LocalDepth _getFullDepth( UIntPack< Degrees ... > , const FEMTreeNode* node ) const;
public:
template< unsigned int ... Degrees > LocalDepth getFullDepth( UIntPack< Degrees ... > ) const;
LocalDepth depth( const FEMTreeNode* node ) const { return _localDepth( node ); }
void depthAndOffset( const FEMTreeNode* node , LocalDepth& depth , LocalOffset& offset ) const { _localDepthAndOffset( node , depth , offset ); }
int nodesSize ( void ) const { return _sNodes.size( ); }
int nodesBegin( LocalDepth d ) const { return _sNodes.begin( _localToGlobal( d ) ); }
int nodesEnd ( LocalDepth d ) const { return _sNodes.end ( _localToGlobal( d ) ); }
int nodesSize ( LocalDepth d ) const { return _sNodes.size ( _localToGlobal( d ) ); }
int nodesBegin( LocalDepth d , int slice ) const { return _sNodes.begin( _localToGlobal( d ) , slice + _localInset( d ) ); }
int nodesEnd ( LocalDepth d , int slice ) const { return _sNodes.end ( _localToGlobal( d ) , slice + _localInset( d ) ); }
int nodesSize ( LocalDepth d , int slice ) const { return _sNodes.size ( _localToGlobal( d ) , slice + _localInset( d ) ); }
const FEMTreeNode* node( int idx ) const { return _sNodes.treeNodes[idx]; }
void centerAndWidth( int idx , Point< Real , Dim >& center , Real& width ) const { _centerAndWidth( _sNodes.treeNodes[idx] , center , width ); }
void startAndWidth( int idx , Point< Real , Dim >& center , Real& width ) const { _startAndWidth( _sNodes.treeNodes[idx] , center , width ); }
protected:
/////////////////////////////////////
// System construction code //
// MultiGridFEMTreeData.System.inl //
/////////////////////////////////////
public:
template< unsigned int ... FEMSigs > void setMultiColorIndices( UIntPack< FEMSigs ... > , int depth , std::vector< std::vector< int > >& indices ) const;
protected:
template< unsigned int ... FEMSigs > void _setMultiColorIndices( UIntPack< FEMSigs ... > , int start , int end , std::vector< std::vector< int > >& indices ) const;
struct _SolverStats
{
double constraintUpdateTime , systemTime , solveTime;
double bNorm2 , inRNorm2 , outRNorm2;
};
template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs >
typename std::enable_if< (sizeof...(PointDs)!=0) >::type _addPointValues( UIntPack< FEMSigs ... > , StaticWindow< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const
{
_addPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , bsData , iInfo ) , _addPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , bsData , iInfos... );
}
template< unsigned int ... FEMSigs >
void _addPointValues( UIntPack< FEMSigs ... > , StaticWindow< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData ) const { }
template< unsigned int ... FEMSigs , typename T , unsigned int PointD >
void _addPointValues( UIntPack< FEMSigs ... > , StaticWindow< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* interpolationInfo ) const;
template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs >
typename std::enable_if< (sizeof...(PointDs)>1) >::type _addProlongedPointValues( UIntPack< FEMSigs ... > , WindowSlice< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > > pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const
{
_addProlongedPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , pNeighbors , bsData , iInfo ) , _addProlongedPointValues( UIntPack< FEMSigs ... >() , pointValues , neighbors , pNeighbors , bsData , iInfos... );
}
template< unsigned int ... FEMSigs > void _addProlongedPointValues( UIntPack< FEMSigs ... > , WindowSlice< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > > pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData ) const { }
template< unsigned int ... FEMSigs , typename T , unsigned int PointD >
void _addProlongedPointValues( UIntPack< FEMSigs ... > , WindowSlice< Real , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > > pointValues , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo ) const;
template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs >
typename std::enable_if< (sizeof...(PointDs)!=0) >::type _setPointValuesFromProlongedSolution( LocalDepth highDepth , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution , InterpolationInfo< T , PointD >* iInfo , InterpolationInfo< T , PointDs >* ... iInfos ) const
{
_setPointValuesFromProlongedSolution( highDepth , bsData , prolongedSolution , iInfo ) , _setPointValuesFromProlongedSolution( highDepth , bsData , prolongedSolution , iInfos... );
}
template< unsigned int ... FEMSigs , typename T > void _setPointValuesFromProlongedSolution( LocalDepth highDepth , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution ) const { }
template< unsigned int ... FEMSigs , typename T , unsigned int PointD >
void _setPointValuesFromProlongedSolution( LocalDepth highDepth , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution , InterpolationInfo< T , PointD >* interpolationInfo ) const;
template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs >
typename std::enable_if< (sizeof...(PointDs)!=0) , T >::type _getInterpolationConstraintFromProlongedSolution( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const
{
return _getInterpolationConstraintFromProlongedSolution( neighbors , node , prolongedSolution , bsData , iInfo ) + _getInterpolationConstraintFromProlongedSolution( neighbors , node , prolongedSolution , bsData , iInfos... );
}
template< unsigned int ... FEMSigs , typename T > T _getInterpolationConstraintFromProlongedSolution( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData ) const { return T(); }
template< unsigned int ... FEMSigs , typename T , unsigned int PointD >
T _getInterpolationConstraintFromProlongedSolution( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointD >* iInfo ) const;
template< unsigned int ... FEMSigs , typename T , unsigned int PointD , unsigned int ... PointDs >
typename std::enable_if< (sizeof...(PointDs)!=0) >::type _updateRestrictedInterpolationConstraints( const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const
{
_updateRestrictedInterpolationConstraints( bsData , highDepth , solution , cumulativeConstraints , iInfo ) , _updateRestrictedInterpolationConstraints( bsData , highDepth , solution , cumulativeConstraints , iInfos... );
}
template< unsigned int ... FEMSigs , typename T > void _updateRestrictedInterpolationConstraints( PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints ) const { ; }
template< unsigned int ... FEMSigs , typename T , unsigned int PointD >
void _updateRestrictedInterpolationConstraints( const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints , const InterpolationInfo< T , PointD >* interpolationInfo ) const;
template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > static void _SetParentOverlapBounds( const FEMTreeNode* node , int start[Dim] , int end[Dim] );
template< unsigned int FEMDegree1 , unsigned int FEMDegree2 > static void _SetParentOverlapBounds( int cIdx , int start[Dim] , int end[Dim] );
template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > static void _SetParentOverlapBounds( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , const FEMTreeNode* node , int start[Dim] , int end[Dim] )
{
if( node )
{
int d , off[Dim] ; node->depthAndOffset( d , off );
BaseFEMIntegrator::template ParentOverlapBounds( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , d , off , start , end );
}
}
template< unsigned int ... FEMDegrees1 , unsigned int ... FEMDegrees2 > static void _SetParentOverlapBounds( UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , int cIdx , int start[Dim] , int end[Dim] )
{
BaseFEMIntegrator::template ParentOverlapBounds( UIntPack< FEMDegrees1 ... >() , UIntPack< FEMDegrees2 ... >() , cIdx , start , end );
}
template< unsigned int ... FEMSigs >
int _getProlongedMatrixRowSize( const FEMTreeNode* node , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors ) const;
#if defined( __GNUC__ ) && __GNUC__ < 5
#warning "you've got me gcc version<5"
template< unsigned int ... FEMSigs >
int _getMatrixRowSize( UIntPack< FEMSigs ... > , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors ) const;
#else // !__GNUC__ || __GNUC__ >=5
template< unsigned int ... FEMSigs >
int _getMatrixRowSize( const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors ) const;
#endif // __GNUC__ || __GNUC__ < 4
template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs >
T _setMatrixRowAndGetConstraintFromProlongation( UIntPack< FEMSigs ... > , const BaseSystem< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , Pointer( MatrixEntry< Real > ) row , int offset , const PCStencils< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& pcStencils , const CCStencil< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& ccStencil , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) prolongedSolution , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs >
int _setProlongedMatrixRow( const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , Pointer( MatrixEntry< Real > ) row , int offset , const DynamicWindow< double , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& stencil , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
// Updates the constraints @(depth) based on the solution coefficients @(depth-1)
template< unsigned int ... FEMSigs , typename T , unsigned int ... PointDs >
T _getConstraintFromProlongedSolution( UIntPack< FEMSigs ... > , const BaseSystem< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& neighbors , const typename FEMTreeNode::template ConstNeighbors< UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& pNeighbors , const FEMTreeNode* node , ConstPointer( T ) prolongedSolution , const DynamicWindow< double , UIntPack< BSplineOverlapSizes< FEMSignature< FEMSigs >::Degree >::OverlapSize ... > >& stencil , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
template< unsigned int ... FEMSigs , typename T , typename TDotT , typename SORWeights , unsigned int ... PointDs >
int _solveFullSystemGS( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , SORWeights sorWeights , _SolverStats& stats , bool computeNorms , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
template< unsigned int ... FEMSigs , typename T , typename TDotT , typename SORWeights , unsigned int ... PointDs >
int _solveSlicedSystemGS( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , unsigned int sliceBlockSize , SORWeights sorWeights , _SolverStats& stats , bool computeNorms , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
template< unsigned int ... FEMSigs , typename T , typename TDotT , typename SORWeights , unsigned int ... PointDs >
int _solveSystemGS( UIntPack< FEMSigs ... > , bool sliced , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , unsigned int sliceBlockSize , SORWeights sorWeights , _SolverStats& stats , bool computeNorms , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const
{
if( sliced ) return _solveSlicedSystemGS( UIntPack< FEMSigs ... >() , F , bsData , depth , solution , prolongedSolution , constraints , Dot , iters , coarseToFine , sliceBlockSize , sorWeights , stats , computeNorms , interpolationInfo ... );
else return _solveFullSystemGS ( UIntPack< FEMSigs ... >() , F , bsData , depth , solution , prolongedSolution , constraints , Dot , iters , coarseToFine , sorWeights , stats , computeNorms , interpolationInfo ... );
}
template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs >
int _solveSystemCG( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) prolongedSolution , ConstPointer( T ) constraints , TDotT Dot , int iters , bool coarseToFine , _SolverStats& stats , bool computeNorms , double cgAccuracy , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs >
void _solveRegularMG( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , Pointer( T ) solution , ConstPointer( T ) constraints , TDotT Dot , int vCycles , int iters , _SolverStats& stats , bool computeNorms , double cgAccuracy , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
// Updates the cumulative integral constraints @(depth-1) based on the change in solution coefficients @(depth)
template< unsigned int ... FEMSigs , typename T >
void _updateRestrictedIntegralConstraints( UIntPack< FEMSigs ... > , const typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth highDepth , ConstPointer( T ) solution , Pointer( T ) cumulativeConstraints ) const;
template< unsigned int PointD , typename T , unsigned int ... FEMSigs >
CumulativeDerivativeValues< T , Dim , PointD > _coarserFunctionValues( UIntPack< FEMSigs ... > , Point< Real , Dim > p , const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) coefficients ) const;
template< unsigned int PointD , typename T , unsigned int ... FEMSigs >
CumulativeDerivativeValues< T , Dim , PointD > _finerFunctionValues( UIntPack< FEMSigs ... > , Point< Real , Dim > p , const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , ConstPointer( T ) coefficients ) const;
template< unsigned int ... FEMSigs , typename T , unsigned int ... PointDs >
int _getSliceMatrixAndProlongationConstraints( UIntPack< FEMSigs ... > , const BaseSystem< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , SparseMatrix< Real >& matrix , Pointer( Real ) diagonalR , const PointEvaluator< UIntPack< FEMSigs ... > , UIntPack< FEMSignature< FEMSigs >::Degree ... > >& bsData , LocalDepth depth , int nBegin , int nEnd , ConstPointer( T ) prolongedSolution , Pointer( T ) constraints , const CCStencil < UIntPack< FEMSignature< FEMSigs >::Degree ... > >& ccStencil , const PCStencils< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& pcStencils , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
// Down samples constraints @(depth) to constraints @(depth-1)
template< class C , unsigned ... Degrees , unsigned int ... FEMSigs > void _downSample( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< Degrees ... > >& RP , LocalDepth highDepth , Pointer( C ) constraints ) const;
// Up samples coefficients @(depth-1) to coefficients @(depth)
template< class C , unsigned ... Degrees , unsigned int ... FEMSigs > void _upSample( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template RestrictionProlongation< UIntPack< Degrees ... > >& RP , LocalDepth highDepth , Pointer( C ) coefficients ) const;
template< bool XMajor , class C , unsigned int ... FEMSigs > static void _RegularGridUpSample( UIntPack< FEMSigs ... > , LocalDepth highDepth , ConstPointer( C ) lowCoefficients , Pointer( C ) highCoefficients );
template< bool XMajor , class C , unsigned int ... FEMSigs > static void _RegularGridUpSample( UIntPack< FEMSigs ... > , const int lowBegin[] , const int lowEnd[] , const int highBegin[] , const int highEnd[] , LocalDepth highDepth , ConstPointer( C ) lowCoefficients , Pointer( C ) highCoefficients );
public:
template< class C , unsigned int ... FEMSigs > DenseNodeData< C , UIntPack< FEMSigs ... > > coarseCoefficients( const DenseNodeData< C , UIntPack< FEMSigs ... > >& coefficients ) const;
template< class C , unsigned int ... FEMSigs > DenseNodeData< C , UIntPack< FEMSigs ... > > coarseCoefficients( const SparseNodeData< C , UIntPack< FEMSigs ... > >& coefficients ) const;
// For each (valid) fem node, compute the ratio of the sum of active prolongation weights to the sum of total prolongation weights
// If the prolongToChildren flag is set, then these weights are pushed to the children by computing the ratio of the prolongation of the above weights to the prolongation of unity weights
template< unsigned int ... FEMSigs > DenseNodeData< Real , UIntPack< FEMSigs ... > > prolongationWeights( UIntPack< FEMSigs ... > , bool prolongToChildren ) const;
// For each (valid) fem node, compute the integral of the basis function over the valid space nodes over the integral of the basis function
template< unsigned int ... FEMSigs > DenseNodeData< Real , UIntPack< FEMSigs ... > > supportWeights( UIntPack< FEMSigs ... > ) const;
protected:
//////////////////////////////////////////////
// Code for splatting point-sample data //
// MultiGridFEMTreeData.WeightedSamples.inl //
//////////////////////////////////////////////
template< unsigned int WeightDegree >
void _addWeightContribution( DensityEstimator< WeightDegree >& densityWeights , FEMTreeNode* node , Point< Real , Dim > position , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , Real weight=Real(1.0) );
template< unsigned int WeightDegree , class PointSupportKey >
Real _getSamplesPerNode( const DensityEstimator< WeightDegree >& densityWeights , const FEMTreeNode* node , Point< Real , Dim > position , PointSupportKey& weightKey ) const;
template< unsigned int WeightDegree , class WeightKey >
void _getSampleDepthAndWeight( const DensityEstimator< WeightDegree >& densityWeights , const FEMTreeNode* node , Point< Real , Dim > position , WeightKey& weightKey , Real& depth , Real& weight ) const;
template< unsigned int WeightDegree , class WeightKey >
void _getSampleDepthAndWeight( const DensityEstimator< WeightDegree >& densityWeights , Point< Real , Dim > position , WeightKey& weightKey , Real& depth , Real& weight ) const;
template< bool CreateNodes , class V , unsigned int ... DataSigs > void _splatPointData( FEMTreeNode* node , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey );
template< bool CreateNodes , unsigned int WeightDegree , class V , unsigned int ... DataSigs > Real _splatPointData( const DensityEstimator< WeightDegree >& densityWeights , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , PointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey , LocalDepth minDepth , LocalDepth maxDepth , int dim , Real depthBias );
template< bool CreateNodes , unsigned int WeightDegree , class V , unsigned int ... DataSigs > Real _multiSplatPointData( const DensityEstimator< WeightDegree >* densityWeights , FEMTreeNode* node , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , PointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey , int dim );
template< unsigned int WeightDegree , class V , unsigned int ... DataSigs > Real _nearestMultiSplatPointData( const DensityEstimator< WeightDegree >* densityWeights , FEMTreeNode* node , Point< Real , Dim > point , V v , SparseNodeData< V , UIntPack< DataSigs ... > >& data , PointSupportKey< IsotropicUIntPack< Dim , WeightDegree > >& weightKey , int dim=Dim );
template< class V , class Coefficients , unsigned int D , unsigned int ... DataSigs > V _evaluate( const Coefficients& coefficients , Point< Real , Dim > p , const PointEvaluator< UIntPack< DataSigs ... > , IsotropicUIntPack< Dim , D > >& pointEvaluator , const ConstPointSupportKey< UIntPack< FEMSignature< DataSigs >::Degree ... > >& dataKey ) const;
public:
template< bool XMajor, class V, unsigned int ... DataSigs > Pointer(V) regularGridEvaluate(const DenseNodeData< V, UIntPack< DataSigs ... > >& coefficients, int& res, LocalDepth depth = -1, bool primal = false) const;
template< bool XMajor, class V, unsigned int ... DataSigs > Pointer2(V) regularGridEvaluate_2darray(const DenseNodeData< V, UIntPack< DataSigs ... > >& coefficients, int& res, LocalDepth depth = -1, bool primal = false) const;
template< bool XMajor , class V , unsigned int ... DataSigs > Pointer( V ) regularGridUpSample( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients , LocalDepth depth=-1 ) const;
template< bool XMajor , class V , unsigned int ... DataSigs > Pointer( V ) regularGridUpSample( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients , const int begin[Dim] , const int end[Dim] , LocalDepth depth=-1 ) const;
template< class V , unsigned int ... DataSigs > V average( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients ) const;
template< class V , unsigned int ... DataSigs > V average( const DenseNodeData< V , UIntPack< DataSigs ... > >& coefficients , const Real begin[Dim] , const Real end[Dim] ) const;
template< typename T > struct HasNormalDataFunctor{};
template< unsigned int ... NormalSigs >
struct HasNormalDataFunctor< UIntPack< NormalSigs ... > >
{
const SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > >& normalInfo;
HasNormalDataFunctor( const SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > >& ni ) : normalInfo( ni ){ ; }
bool operator() ( const FEMTreeNode* node ) const
{
const Point< Real , Dim >* n = normalInfo( node );
if( n )
{
const Point< Real , Dim >& normal = *n;
for( int d=0 ; d<Dim ; d++ ) if( normal[d]!=0 ) return true;
}
if( node->children ) for( int c=0 ; c<(1<<Dim) ; c++ ) if( (*this)( node->children + c ) ) return true;
return false;
}
};
struct TrivialHasDataFunctor{ bool operator() ( const FEMTreeNode* node ) const { return true; } };
protected:
// [NOTE] The input/output for this method is pre-scaled by weight
template< typename T > bool _setInterpolationInfoFromChildren( FEMTreeNode* node , SparseNodeData< T , IsotropicUIntPack< Dim , FEMTrivialSignature > >& iInfo ) const;
template< typename T , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointInfo < Dim , Real , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstraintDual constraintDual , int adaptiveExponent ) const;
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointAndDataInfo< Dim , Real , Data , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , int adaptiveExponent ) const;
template< typename T , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointInfoBrood < Dim , Real , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyChildInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstraintDual constraintDual , bool noRescale ) const;
template< typename T , typename Data , unsigned int PointD , typename ConstraintDual > SparseNodeData< DualPointAndDataInfoBrood< Dim , Real , Data , T , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > _densifyChildInterpolationInfoAndSetDualConstraints( const std::vector< PointSample >& samples , ConstPointer( Data ) sampleData , ConstraintDual constraintDual , bool noRescale ) const;
void _setSpaceValidityFlags( void ) const;
template< unsigned int ... FEMSigs1 > void _setFEM1ValidityFlags( UIntPack< FEMSigs1 ... > ) const;
template< unsigned int ... FEMSigs2 > void _setFEM2ValidityFlags( UIntPack< FEMSigs2 ... > ) const;
template< unsigned int ... FEMSigs > void _setRefinabilityFlags( UIntPack< FEMSigs ... > ) const;
template< class HasDataFunctor > void _clipTree( const HasDataFunctor& f , LocalDepth fullDepth );
public:
template< unsigned int PointD , unsigned int ... FEMSigs > SparseNodeData< CumulativeDerivativeValues< Real , Dim , PointD > , IsotropicUIntPack< Dim , FEMTrivialSignature > > leafValues( const DenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients , int maxDepth=-1 ) const;
protected:
/////////////////////////////////////
// Evaluation Methods //
// MultiGridFEMTreeData.Evaluation //
/////////////////////////////////////
static const unsigned int CHILDREN = 1<<Dim;
template< typename Pack , unsigned int PointD > struct _Evaluator{ };
template< unsigned int ... FEMSigs , unsigned int PointD >
struct _Evaluator< UIntPack< FEMSigs ... > , PointD >
{
static_assert( Dim == sizeof...(FEMSigs) , "[ERROR] Number of signatures doesn't match dimension" );
typedef DynamicWindow< CumulativeDerivativeValues< double , Dim , PointD > , UIntPack< BSplineSupportSizes< FEMSignature< FEMSigs >::Degree >::SupportSize ... > > CenterStencil;
typedef DynamicWindow< CumulativeDerivativeValues< double , Dim , PointD > , UIntPack< BSplineSupportSizes< FEMSignature< FEMSigs >::Degree >::SupportSize ... > > CornerStencil;
typedef DynamicWindow< CumulativeDerivativeValues< double , Dim , PointD > , UIntPack< ( BSplineSupportSizes< FEMSignature< FEMSigs >::Degree >::BCornerSize + 1 ) ... > > BCornerStencil;
typedef std::tuple< typename BSplineEvaluationData< FEMSigs >::template Evaluator< PointD > ... > Evaluators;
typedef std::tuple< typename BSplineEvaluationData< FEMSigs >::template ChildEvaluator< PointD > ... > ChildEvaluators;
struct StencilData
{
CenterStencil ccCenterStencil , pcCenterStencils[CHILDREN];
CornerStencil ccCornerStencil[CHILDREN] , pcCornerStencils[CHILDREN][CHILDREN];
BCornerStencil ccBCornerStencil[CHILDREN] , pcBCornerStencils[CHILDREN][CHILDREN];
};
Pointer( StencilData ) stencilData;
Pointer( Evaluators ) evaluators;
Pointer( ChildEvaluators ) childEvaluators;
void set( LocalDepth depth );
_Evaluator( void ){ _pointEvaluator = NULL ; stencilData = NullPointer( StencilData ) , evaluators = NullPointer( Evaluators ) , childEvaluators = NullPointer( ChildEvaluators ); }
~_Evaluator( void ){ if( _pointEvaluator ) delete _pointEvaluator , _pointEvaluator = NULL ; if( stencilData ) DeletePointer( stencilData ) ; if( evaluators ) DeletePointer( evaluators ) ; if( childEvaluators ) DeletePointer( childEvaluators ); }
protected:
enum _CenterOffset{ CENTER=-1 , BACK=0 , FRONT=1 };
template< unsigned int _PointD=PointD > CumulativeDerivativeValues< double , Dim , _PointD > _values( unsigned int d , const int fIdx[Dim] , const int idx[Dim] , const _CenterOffset off[Dim] , bool parentChild ) const;
template< unsigned int _PointD=PointD > CumulativeDerivativeValues< double , Dim , _PointD > _centerValues( unsigned int d , const int fIdx[Dim] , const int idx[Dim] , bool parentChild ) const;
template< unsigned int _PointD=PointD > CumulativeDerivativeValues< double , Dim , _PointD > _cornerValues( unsigned int d , const int fIdx[Dim] , const int idx[Dim] , int corner , bool parentChild ) const;
template< unsigned int _PointD=PointD , unsigned int I=0 > typename std::enable_if< I==Dim >::type _setDValues( unsigned int d , const int fIdx[] , const int cIdx[] , const _CenterOffset off[] , bool pc , double dValues[][_PointD+1] ) const{ }
template< unsigned int _PointD=PointD , unsigned int I=0 > typename std::enable_if< I< Dim >::type _setDValues( unsigned int d , const int fIdx[] , const int cIdx[] , const _CenterOffset off[] , bool pc , double dValues[][_PointD+1] ) const
{
if( pc ) for( int dd=0 ; dd<=_PointD ; dd++ ) dValues[I][dd] = off[I]==CENTER ? std::get< I >( childEvaluators[d] ).centerValue( fIdx[I] , cIdx[I] , dd ) : std::get< I >( childEvaluators[d] ).cornerValue( fIdx[I] , cIdx[I]+off[I] , dd );
else for( int dd=0 ; dd<=_PointD ; dd++ ) dValues[I][dd] = off[I]==CENTER ? std::get< I >( evaluators[d] ).centerValue( fIdx[I] , cIdx[I] , dd ) : std::get< I >( evaluators[d] ).cornerValue( fIdx[I] , cIdx[I]+off[I] , dd );
_setDValues< _PointD , I+1 >( d , fIdx , cIdx , off , pc , dValues );
}
template< unsigned int I=0 > typename std::enable_if< I==Dim >::type _setEvaluators( unsigned int maxDepth ){ }
template< unsigned int I=0 > typename std::enable_if< I< Dim >::type _setEvaluators( unsigned int maxDepth )
{
static const unsigned int FEMSig = UIntPack< FEMSigs ... >::template Get< I >();
for( unsigned int d=0 ; d<=maxDepth ; d++ ) BSplineEvaluationData< FEMSig >:: SetEvaluator( std::template get< I >( evaluators[d] ) , d );
for( unsigned int d=1 ; d<=maxDepth ; d++ ) BSplineEvaluationData< FEMSig >::SetChildEvaluator( std::template get< I >( childEvaluators[d] ) , d-1 );
_setEvaluators< I+1 >( maxDepth );
}
typename FEMIntegrator::template PointEvaluator< UIntPack< FEMSigs ... > , IsotropicUIntPack< Dim , PointD > >* _pointEvaluator;
friend FEMTree;
};
template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD >
CumulativeDerivativeValues< V , Dim , _PointD > _getCenterValues( const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth , bool isInterior ) const;
template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD >
CumulativeDerivativeValues< V , Dim , _PointD > _getCornerValues( const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , int corner , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth , bool isInterior ) const;
template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD >
CumulativeDerivativeValues< V , Dim , _PointD > _getValues ( const ConstPointSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , Point< Real , Dim > p , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth ) const;
template< class V , unsigned int _PointD , unsigned int ... FEMSigs , unsigned int PointD >
CumulativeDerivativeValues< V , Dim , _PointD > _getCornerValues( const ConstCornerSupportKey< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& neighborKey , const FEMTreeNode* node , int corner , ConstPointer( V ) solution , ConstPointer( V ) coarseSolution , const _Evaluator< UIntPack< FEMSigs ... > , PointD >& evaluator , int maxDepth , bool isInterior ) const;
template< unsigned int ... SupportSizes >
struct CornerLoopData
{
typedef UIntPack< SupportSizes ... > _SupportSizes;
// static const unsigned int supportSizes[] = { SupportSizes ... };
static const unsigned int supportSizes[];
unsigned int ccSize[1<<Dim] , pcSize[1<<Dim][1<<Dim];
unsigned int ccIndices[1<<Dim] [ WindowSize< _SupportSizes >::Size ];
unsigned int pcIndices[1<<Dim][1<<Dim][ WindowSize< _SupportSizes >::Size ];
CornerLoopData( void )
{
int start[Dim] , end[Dim] , _start[Dim] , _end[Dim];
for( int c=0 ; c<(1<<Dim) ; c++ )
{
ccSize[c] = 0;
for( int dd=0 ; dd<Dim ; dd++ )
{
start[dd] = 0 , end[dd] = supportSizes[dd];
if( (c>>dd) & 1 ) start[dd]++;
else end [dd]--;
}
unsigned int idx[Dim];
WindowLoop< Dim >::Run
(
start , end ,
[&]( int d , int i ){ idx[d] = i; } ,
[&]( void ){ ccIndices[c][ ccSize[c]++ ] = GetWindowIndex( _SupportSizes() , idx ); }
);
for( int _c=0 ; _c<(1<<Dim) ; _c++ )
{
pcSize[c][_c] = 0;
for( int dd=0 ; dd<Dim ; dd++ )
{
if( ( (_c>>dd) & 1 ) != ( (c>>dd) & 1 ) ) _start[dd] = 0 , _end[dd] = supportSizes[dd];
else _start[dd] = start[dd] , _end[dd] = end[dd];
}
unsigned int idx[Dim];
WindowLoop< Dim >::Run
(
_start , _end ,
[&]( int d , int i ){ idx[d] = i; } ,
[&]( void ){ pcIndices[c][_c][ pcSize[c][_c]++ ] = GetWindowIndex( _SupportSizes() , idx ); }
);
}
}
}
};
public:
template< typename Pack , unsigned int PointD , typename T > struct _MultiThreadedEvaluator{ };
template< unsigned int ... FEMSigs , unsigned int PointD , typename T >
struct _MultiThreadedEvaluator< UIntPack< FEMSigs ... > , PointD , T >
{
typedef UIntPack< FEMSigs ... > FEMSignatures;
typedef UIntPack< FEMSignature< FEMSigs >::Degree ... > FEMDegrees;
const FEMTree* _tree;
int _threads;
std::vector< ConstPointSupportKey< FEMDegrees > > _pointNeighborKeys;
std::vector< ConstCornerSupportKey< FEMDegrees > > _cornerNeighborKeys;
_Evaluator< FEMSignatures , PointD > _evaluator;
const DenseNodeData< T , FEMSignatures >& _coefficients;
DenseNodeData< T , FEMSignatures > _coarseCoefficients;
public:
_MultiThreadedEvaluator( const FEMTree* tree , const DenseNodeData< T , FEMSignatures >& coefficients , int threads=omp_get_max_threads() );
template< unsigned int _PointD=PointD > CumulativeDerivativeValues< T , Dim , _PointD > values( Point< Real , Dim > p , int thread=0 , const FEMTreeNode* node=NULL );
template< unsigned int _PointD=PointD > CumulativeDerivativeValues< T , Dim , _PointD > centerValues( const FEMTreeNode* node , int thread=0 );
template< unsigned int _PointD=PointD > CumulativeDerivativeValues< T , Dim , _PointD > cornerValues( const FEMTreeNode* node , int corner , int thread=0 );
};
template< typename Pack , unsigned int PointD , typename T=Real > using MultiThreadedEvaluator = _MultiThreadedEvaluator< Pack , PointD , T >;
template< unsigned int DensityDegree >
struct MultiThreadedWeightEvaluator
{
const FEMTree* _tree;
int _threads;
std::vector< ConstPointSupportKey< IsotropicUIntPack< Dim , DensityDegree > > > _neighborKeys;
const DensityEstimator< DensityDegree >& _density;
public:
MultiThreadedWeightEvaluator( const FEMTree* tree , const DensityEstimator< DensityDegree >& density , int threads=omp_get_max_threads() );
Real weight( Point< Real , Dim > p , int thread=0 );
};
static double _MaxMemoryUsage , _LocalMemoryUsage;
void _reorderDenseOrSparseNodeData( const int* , size_t ){ ; }
template< class Data , unsigned int ... FEMSigs , class ... DenseOrSparseNodeData >
void _reorderDenseOrSparseNodeData( const int* map , size_t sz , SparseNodeData< Data , UIntPack< FEMSigs ... > >* sData , DenseOrSparseNodeData* ... data )
{
if( sData ) sData->_remapIndices( map , (int)sz );
_reorderDenseOrSparseNodeData( map , sz , data ... );
}
template< class Data , unsigned int ... FEMSigs , class ... DenseOrSparseNodeData >
void _reorderDenseOrSparseNodeData( const int* map , size_t sz , DenseNodeData< Data , UIntPack< FEMSigs ... > >* dData , DenseOrSparseNodeData* ... data )
{
if( dData ) dData->_remapIndices( map , sz );
_reorderDenseOrSparseNodeData( map , sz , data ... );
}
public:
static double MaxMemoryUsage( void ){ return _MaxMemoryUsage; }
static double LocalMemoryUsage( void ){ return _LocalMemoryUsage; }
static void ResetLocalMemoryUsage( void ){ _LocalMemoryUsage = 0; }
static double MemoryUsage( void );
FEMTree( int blockSize );
FEMTree( FILE* fp , int blockSize );
~FEMTree( void )
{
if( _tree ) for( int c=0 ; c<(1<<Dim) ; c++ ) _tree[c].cleanChildren( nodeAllocator );
if( nodeAllocator ) delete nodeAllocator;
}
void write( FILE* fp ) const;
static void WriteParameter( FILE* fp )
{
FEMTreeRealType realType;
if ( typeid( Real )==typeid( float ) ) realType=FEM_TREE_REAL_FLOAT;
else if( typeid( Real )==typeid( double ) ) realType=FEM_TREE_REAL_DOUBLE;
else ERROR_OUT( "Unrecognized real type" );
fwrite( &realType , sizeof(FEMTreeRealType) , 1 , fp );
int dim = Dim;
fwrite( &dim , sizeof(int) , 1 , fp );
}
template< unsigned int LeftRadius , unsigned int RightRadius , class ... DenseOrSparseNodeData > void thicken( FEMTreeNode** nodes , size_t nodeCount , DenseOrSparseNodeData* ... data );
template< unsigned int LeftRadius , unsigned int RightRadius , class IsThickenNode , class ... DenseOrSparseNodeData > void thicken( IsThickenNode F , DenseOrSparseNodeData* ... data );
template< unsigned int Radius , class ... DenseOrSparseNodeData > void thicken( FEMTreeNode** nodes , size_t nodeCount , DenseOrSparseNodeData* ... data ){ thicken< Radius , Radius >( nodes , nodeCount , data ... ); }
template< unsigned int Radius , class IsThickenNode , class ... DenseOrSparseNodeData > void thicken( IsThickenNode F , DenseOrSparseNodeData* ... data ){ thicken< Radius , Radius >( F , data ... ); }
template< unsigned int DensityDegree >
typename FEMTree::template DensityEstimator< DensityDegree >* setDensityEstimator( const std::vector< PointSample >& samples , LocalDepth splatDepth , Real samplesPerNode , int coDimension );
template< unsigned int ... NormalSigs , unsigned int DensityDegree , class Data >
#if defined(_WIN32) || defined(_WIN64)
SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > > setNormalField( UIntPack< NormalSigs ... > , const std::vector< PointSample >& samples , const std::vector< Data >& normalData , const DensityEstimator< DensityDegree >* density , Real& pointWeightSum , std::function< Real ( Real ) > BiasFunction = []( Real ){ return 0.f; } );
#else // !_WIN32 && !_WIN64
SparseNodeData< Point< Real , Dim > , UIntPack< NormalSigs ... > > setNormalField( UIntPack< NormalSigs ... > , const std::vector< PointSample >& samples , const std::vector< Data >& normalData , const DensityEstimator< DensityDegree >* density , Real& pointWeightSum , std::function< Real ( Real ) > BiasFunction = []( Real ){ return (Real)0; } );
#endif // _WIN32 || _WIN64
template< unsigned int DataSig , bool CreateNodes , unsigned int DensityDegree , class Data >
SparseNodeData< Data , IsotropicUIntPack< Dim , DataSig > > setSingleDepthDataField( const std::vector< PointSample >& samples , const std::vector< Data >& sampleData , const DensityEstimator< DensityDegree >* density );
template< unsigned int DataSig , bool CreateNodes , unsigned int DensityDegree , class Data >
SparseNodeData< ProjectiveData< Data , Real > , IsotropicUIntPack< Dim , DataSig > > setDataField( const std::vector< PointSample >& samples , std::vector< Data >& sampleData , const DensityEstimator< DensityDegree >* density , bool nearest=false );
template< unsigned int MaxDegree , class HasDataFunctor , class ... DenseOrSparseNodeData > void finalizeForMultigrid( LocalDepth fullDepth , const HasDataFunctor F , DenseOrSparseNodeData* ... data );
template< unsigned int ... FEMSigs > DenseNodeData< Real , UIntPack< FEMSigs ... > > initDenseNodeData( UIntPack< FEMSigs ... > ) const;
template< class Data , unsigned int ... FEMSigs > DenseNodeData< Data , UIntPack< FEMSigs ... > > initDenseNodeData( UIntPack< FEMSigs ... > ) const;
// Add multiple-dimensions -> one-dimension constraints
template< typename T , unsigned int ... FEMDegrees , unsigned int ... FEMSigs , unsigned int ... CDegrees , unsigned int ... CSigs , unsigned int CDim >
void addFEMConstraints( typename BaseFEMIntegrator::template Constraint< UIntPack< FEMDegrees ... > , UIntPack< CDegrees ... > , CDim >& F , const _SparseOrDenseNodeData< Point< T , CDim > , UIntPack< CSigs ... > >& coefficients , DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const
{
typedef SparseNodeData< Point< T , CDim > , UIntPack< CSigs ... > > SparseType;
typedef DenseNodeData< Point< T , CDim > , UIntPack< CSigs ... > > DenseType;
static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim && sizeof...( CDegrees )==Dim && sizeof...( CSigs )==Dim , "[ERROR] Dimensions don't match" );
static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" );
static_assert( UIntPack< CDegrees ... >::template Compare< UIntPack< FEMSignature< CSigs >::Degree ... > >::Equal , "[ERROR] Constraint signature and degrees don't match" );
if ( typeid(coefficients)==typeid(SparseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const SparseType& >( coefficients ) , constraints() , maxDepth );
else if( typeid(coefficients)==typeid( DenseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const DenseType& >( coefficients ) , constraints() , maxDepth );
else return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , coefficients , constraints() , maxDepth );
}
// Add one-dimensions -> one-dimension constraints (with distinct signatures)
template< typename T , unsigned int ... FEMDegrees , unsigned int ... FEMSigs , unsigned int ... CDegrees , unsigned int ... CSigs >
void addFEMConstraints( typename BaseFEMIntegrator::template Constraint< UIntPack< FEMDegrees ... > , UIntPack< CDegrees ... > , 1 >& F , const _SparseOrDenseNodeData< T , UIntPack< CSigs ... > >& coefficients , DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const
{
typedef SparseNodeData< T , UIntPack< CSigs ... > > SparseType;
typedef DenseNodeData< T , UIntPack< CSigs ... > > DenseType;
static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim && sizeof...( CDegrees )==Dim && sizeof...( CSigs )==Dim , "[ERROR] Dimensions don't match" );
static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" );
static_assert( UIntPack< CDegrees ... >::template Compare< UIntPack< FEMSignature< CSigs >::Degree ... > >::Equal , "[ERROR] Constaint signature and degrees don't match" );
if ( typeid(coefficients)==typeid(SparseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const SparseType& >( coefficients ) , constraints() , maxDepth );
else if( typeid(coefficients)==typeid( DenseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , static_cast< const DenseType& >( coefficients ) , constraints() , maxDepth );
else return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< CSigs ... >() , F , coefficients , constraints() , maxDepth );
}
// Add one-dimensions -> one-dimension constraints (with the same signatures)
template< typename T , unsigned int ... FEMDegrees , unsigned int ... FEMSigs >
// void addFEMConstraints( typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const SparseNodeData< T , UIntPack< FEMSigs ... > >& coefficients , _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const
void addFEMConstraints( typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients , DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth ) const
{
typedef SparseNodeData< T , UIntPack< FEMSigs ... > > SparseType;
typedef DenseNodeData< T , UIntPack< FEMSigs ... > > DenseType;
static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" );
static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signatures and degrees don't match" );
typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F );
if ( typeid(coefficients)==typeid(SparseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients ) , constraints() , maxDepth );
else if( typeid(coefficients)==typeid( DenseType) ) return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients ) , constraints() , maxDepth );
else return _addFEMConstraints< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients , constraints() , maxDepth );
}
// Add interpolation constraints
template< typename T , unsigned int ... FEMSigs , unsigned int PointD , unsigned int ... PointDs >
typename std::enable_if< (sizeof...(PointDs)!=0) >::type addInterpolationConstraints( DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth , const InterpolationInfo< T , PointD >& iInfo , const InterpolationInfo< T , PointDs >& ... iInfos ) const
{
addInterpolationConstraints< T , FEMSigs ... >( constraints , maxDepth , iInfo );
addInterpolationConstraints< T , FEMSigs ... >( constraints , maxDepth , iInfos ... );
}
template< typename T , unsigned int ... FEMSigs , unsigned int PointD > void addInterpolationConstraints( DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxDepth , const InterpolationInfo< T , PointD >& interpolationInfo ) const;
// Real
template< unsigned int ... FEMDegrees1 , unsigned int ... FEMSigs1 , unsigned int ... FEMDegrees2 , unsigned int ... FEMSigs2 >
double dot( typename BaseFEMIntegrator::Constraint< UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , 1 >& F , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs1 ... > >& coefficients1 , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs2 ... > >& coefficients2 ) const
{
typedef SparseNodeData< Real , UIntPack< FEMSigs1 ... > > SparseType1;
typedef DenseNodeData< Real , UIntPack< FEMSigs1 ... > > DenseType1;
typedef SparseNodeData< Real , UIntPack< FEMSigs2 ... > > SparseType2;
typedef DenseNodeData< Real , UIntPack< FEMSigs2 ... > > DenseType2;
static_assert( sizeof...( FEMDegrees1 )==Dim && sizeof...( FEMSigs1 )==Dim && sizeof...( FEMDegrees2 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" );
static_assert( UIntPack< FEMDegrees1 ... >::template Compare< UIntPack< FEMSignature< FEMSigs1 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" );
static_assert( UIntPack< FEMDegrees2 ... >::template Compare< UIntPack< FEMSignature< FEMSigs2 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" );
if ( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else if( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else return _dot< Real >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , coefficients1 , coefficients2 , []( Real v , Real w ){ return v*w; } );
}
template< unsigned int ... FEMDegrees , unsigned int ... FEMSigs >
double dot( typename BaseFEMIntegrator::System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients1 , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients2 ) const
{
typedef SparseNodeData< Real , UIntPack< FEMSigs ... > > SparseType;
typedef DenseNodeData< Real , UIntPack< FEMSigs ... > > DenseType;
static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" );
static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signatures and degrees don't match" );
typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F );
if ( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else if( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , []( Real v , Real w ){ return v*w; } );
else return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients1 , coefficients2 , []( Real v , Real w ){ return v*w; } );
}
template< unsigned int ... FEMDegrees , unsigned int ... FEMSigs >
double squareNorm( typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients ) const
{
typedef SparseNodeData< Real , UIntPack< FEMSigs ... > > SparseType;
typedef DenseNodeData< Real , UIntPack< FEMSigs ... > > DenseType;
typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F );
if ( typeid(coefficients)==typeid(SparseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients ) , static_cast< const SparseType& >( coefficients ) , []( Real v , Real w ){ return v*w; } );
else if( typeid(coefficients)==typeid( DenseType) ) return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients ) , static_cast< const DenseType& >( coefficients ) , []( Real v , Real w ){ return v*w; } );
else return _dot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients , coefficients , []( Real v , Real w ){ return v*w; } );
}
template< unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , unsigned int ... PointDs >
double interpolationDot( const DenseNodeData< Real , UIntPack< FEMSigs1 ... > >& coefficients1 , const DenseNodeData< Real , UIntPack< FEMSigs2 ... > >& coefficients2 , const InterpolationInfo< Real , PointDs >* ... iInfos ) const
{
static_assert( sizeof...( FEMSigs1 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" );
return _inteprolationDot( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , []( Real v , Real w ){ return v*w; } , iInfos... );
}
template< unsigned int ... FEMSigs , unsigned int ... PointDs >
double interpolationSquareNorm( const DenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients , const InterpolationInfo< Real , PointDs >* ... iInfos ) const
{
static_assert( sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" );
return _interpolationDot< Real >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , coefficients , coefficients , []( Real v , Real w ){ return v*w; } , iInfos... );
}
// Generic
template< typename T , typename TDotT , unsigned int ... FEMDegrees1 , unsigned int ... FEMSigs1 , unsigned int ... FEMDegrees2 , unsigned int ... FEMSigs2 >
double dot( TDotT Dot , typename BaseFEMIntegrator::Constraint< UIntPack< FEMDegrees1 ... > , UIntPack< FEMDegrees2 ... > , 1 >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs1 ... > >& coefficients1 , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs2 ... > >& coefficients2 ) const
{
typedef SparseNodeData< T , UIntPack< FEMSigs1 ... > > SparseType1;
typedef DenseNodeData< T , UIntPack< FEMSigs1 ... > > DenseType1;
typedef SparseNodeData< T , UIntPack< FEMSigs2 ... > > SparseType2;
typedef DenseNodeData< T , UIntPack< FEMSigs2 ... > > DenseType2;
static_assert( sizeof...( FEMDegrees1 )==Dim && sizeof...( FEMSigs1 )==Dim && sizeof...( FEMDegrees2 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" );
static_assert( UIntPack< FEMDegrees1 ... >::template Compare< UIntPack< FEMSignature< FEMSigs1 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" );
static_assert( UIntPack< FEMDegrees2 ... >::template Compare< UIntPack< FEMSignature< FEMSigs2 >::Degree ... > >::Equal , "[ERROR] FEM signature and degrees don't match" );
if ( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , Dot );
else if( typeid(coefficients1)==typeid(SparseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const SparseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , Dot );
else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid( DenseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const DenseType2& >( coefficients2 ) , Dot );
else if( typeid(coefficients1)==typeid( DenseType1) && typeid(coefficients2)==typeid(SparseType2) ) return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , static_cast< const DenseType1& >( coefficients1 ) , static_cast< const SparseType2& >( coefficients2 ) , Dot );
else return _dot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , F , coefficients1 , coefficients2 , Dot );
}
template< typename T , typename TDotT , unsigned int ... FEMDegrees , unsigned int ... FEMSigs >
double dot( TDotT Dot , typename BaseFEMIntegrator::System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients1 , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients2 ) const
{
typedef SparseNodeData< T , UIntPack< FEMSigs ... > > SparseType;
typedef DenseNodeData< T , UIntPack< FEMSigs ... > > DenseType;
static_assert( sizeof...( FEMDegrees )==Dim && sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" );
static_assert( UIntPack< FEMDegrees ... >::template Compare< UIntPack< FEMSignature< FEMSigs >::Degree ... > >::Equal , "[ERROR] FEM signatures and degrees don't match" );
typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F );
if ( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , Dot );
else if( typeid(coefficients1)==typeid(SparseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , Dot );
else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid( DenseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const DenseType& >( coefficients2 ) , Dot );
else if( typeid(coefficients1)==typeid( DenseType) && typeid(coefficients2)==typeid(SparseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients1 ) , static_cast< const SparseType& >( coefficients2 ) , Dot );
else return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients1 , coefficients2 , Dot );
}
template< typename T , typename TDotT , unsigned int ... FEMDegrees , unsigned int ... FEMSigs >
double squareNorm( TDotT Dot , typename BaseFEMIntegrator::template System< UIntPack< FEMDegrees ... > >& F , const _SparseOrDenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients ) const
{
typedef SparseNodeData< T , UIntPack< FEMSigs ... > > SparseType;
typedef DenseNodeData< T , UIntPack< FEMSigs ... > > DenseType;
typename BaseFEMIntegrator::template SystemConstraint< UIntPack< FEMDegrees ... > > _F( F );
if ( typeid(coefficients)==typeid(SparseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const SparseType& >( coefficients ) , static_cast< const SparseType& >( coefficients ) , Dot );
else if( typeid(coefficients)==typeid( DenseType) ) return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , static_cast< const DenseType& >( coefficients ) , static_cast< const DenseType& >( coefficients ) , Dot );
else return _dot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , _F , coefficients , coefficients , Dot );
}
template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , unsigned int ... PointDs >
double interpolationDot( TDotT Dot , const DenseNodeData< T , UIntPack< FEMSigs1 ... > >& coefficients1 , const DenseNodeData< T , UIntPack< FEMSigs2 ... > >& coefficients2 , const InterpolationInfo< T , PointDs >* ... iInfos ) const
{
static_assert( sizeof...( FEMSigs1 )==Dim && sizeof...( FEMSigs2 )==Dim , "[ERROR] Dimensions don't match" );
return _interpolationDot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , Dot , iInfos... );
}
template< typename T , typename TDotT , unsigned int ... FEMSigs , unsigned int ... PointDs >
double interpolationSquareNorm( TDotT Dot , const DenseNodeData< T , UIntPack< FEMSigs ... > >& coefficients , const InterpolationInfo< T , PointDs >* ... iInfos ) const
{
static_assert( sizeof...( FEMSigs )==Dim , "[ERROR] Dimensions don't match" );
return _interpolationDot< T >( UIntPack< FEMSigs ... >() , UIntPack< FEMSigs ... >() , coefficients , coefficients , Dot , iInfos... );
}
template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs >
SparseMatrix< Real > systemMatrix( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth depth , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs >
SparseMatrix< Real > prolongedSystemMatrix( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth highDepth , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
template< unsigned int ... FEMSigs >
SparseMatrix< Real > downSampleMatrix( UIntPack< FEMSigs ... > , LocalDepth highDepth ) const;
template< typename T , unsigned int ... PointDs , unsigned int ... FEMSigs >
SparseMatrix< Real > fullSystemMatrix( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , LocalDepth depth , bool nonRefinableOnly , const InterpolationInfo< T , PointDs >* ... interpolationInfo ) const;
struct SolverInfo
{
protected:
struct _IterFunction
{
_IterFunction( int i ) : _i0(i) , _type(0) {}
_IterFunction( std::function< int ( int ) > iFunction ) : _i1(iFunction) , _type(1) {}
_IterFunction( std::function< int ( bool , int ) > iFunction ) : _i2(iFunction) , _type(2) {}
_IterFunction( std::function< int ( int , bool , int ) > iFunction ) : _i3(iFunction) , _type(3) {}
_IterFunction& operator = ( int i ){ *this = _IterFunction(i) ; return *this; }
_IterFunction& operator = ( std::function< int ( int ) > iFunction ){ *this = _IterFunction(iFunction) ; return *this; }
_IterFunction& operator = ( std::function< int ( bool , int ) > iFunction ){ *this = _IterFunction(iFunction) ; return *this; }
_IterFunction& operator = ( std::function< int ( int , bool , int ) > iFunction ){ *this = _IterFunction(iFunction) ; return *this; }
int operator()( int vCycle , bool restriction , int depth ) const
{
switch( _type )
{
case 0: return _i0;
case 1: return _i1( depth );
case 2: return _i2( restriction , depth );
case 3: return _i3( vCycle , restriction , depth );
default: return 0;
}
}
protected:
int _i0;
std::function< int ( int ) > _i1;
std::function< int ( bool , int ) > _i2;
std::function< int ( int i3 , bool , int ) > _i3;
int _type;
};
public:
// How to solve
bool wCycle;
LocalDepth cgDepth;
bool cascadic;
unsigned int sliceBlockSize;
bool useSupportWeights , useProlongationSupportWeights;
std::function< Real ( Real , Real ) > sorRestrictionFunction;
std::function< Real ( Real , Real ) > sorProlongationFunction;
_IterFunction iters;
int vCycles;
double cgAccuracy;
int baseDepth , baseVCycles;
// What to output
bool verbose , showResidual;
int showGlobalResidual;
SolverInfo( void ) : cgDepth(0) , wCycle(false) , cascadic(true) , iters(1) , vCycles(1) , cgAccuracy(0.) , verbose(false) , showResidual(false) , showGlobalResidual(SHOW_GLOBAL_RESIDUAL_NONE) , sliceBlockSize(1) , sorRestrictionFunction( []( Real , Real ){ return (Real)1; } ) , sorProlongationFunction( []( Real , Real ){ return (Real)1; } ) , useSupportWeights( false ) , useProlongationSupportWeights( false ) , baseDepth(0) , baseVCycles(1) { }
};
// Solve the linear system
template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs >
void solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , DenseNodeData< T , UIntPack< FEMSigs ... > >& solution , TDotT Dot , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< T , PointDs >* ... iData ) const;
template< unsigned int ... FEMSigs , typename T , typename TDotT , unsigned int ... PointDs >
DenseNodeData< T , UIntPack< FEMSigs ... > > solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< T , UIntPack< FEMSigs ... > >& constraints , TDotT Dot , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< T , PointDs >* ... iData ) const;
template< unsigned int ... FEMSigs , unsigned int ... PointDs >
void solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< Real , UIntPack< FEMSigs ... > >& constraints , DenseNodeData< Real , UIntPack< FEMSigs ... > >& solution , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< Real , PointDs >* ... iData ) const
{
return solveSystem< FEMSigs ... , Real >( UIntPack< FEMSigs ... >() , F , constraints , solution , []( Real v , Real w ){ return v*w; } , maxSolveDepth , solverInfo , iData ... );
}
template< unsigned int ... FEMSigs , unsigned int ... PointDs >
DenseNodeData< Real , UIntPack< FEMSigs ... > > solveSystem( UIntPack< FEMSigs ... > , typename BaseFEMIntegrator::template System< UIntPack< FEMSignature< FEMSigs >::Degree ... > >& F , const DenseNodeData< Real , UIntPack< FEMSigs ... > >& constraints , LocalDepth maxSolveDepth , const SolverInfo& solverInfo , InterpolationInfo< Real , PointDs >* ... iData ) const
{
return solveSystem( UIntPack< FEMSigs ... >() , F , constraints , []( Real v , Real w ){ return v*w; } , maxSolveDepth , solverInfo , iData ... );
}
FEMTreeNode& spaceRoot( void ){ return *_spaceRoot; }
const FEMTreeNode& tree( void ) const { return *_tree; }
std::function< void ( FEMTreeNode& ) > initializer( void ){ return _NodeInitializer( *this ); }
size_t leaves( void ) const { return _tree->leaves(); }
size_t nodes( void ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( IsActiveNode< Dim >( n ) ) count++ ; return count; }
size_t ghostNodes( void ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( !IsActiveNode< Dim >( n ) ) count++ ; return count; }
inline size_t validSpaceNodes( void ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( isValidSpaceNode( n ) ) count++ ; return count; }
inline size_t validSpaceNodes( LocalDepth d ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( _localDepth(n)==d && isValidSpaceNode( n ) ) count++ ; return count; }
template< unsigned int ... FEMSigs > size_t validFEMNodes( UIntPack< FEMSigs ... > ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( isValidFEMNode( UIntPack< FEMSigs ... >() , n ) ) count++ ; return count; }
template< unsigned int ... FEMSigs > size_t validFEMNodes( UIntPack< FEMSigs ... > , LocalDepth d ) const { int count = 0 ; for( const FEMTreeNode* n=_tree->nextNode() ; n ; n=_tree->nextNode( n ) ) if( _localDepth(n)==d && isValidFEMNode( UIntPack< FEMSigs ... >() , n ) ) count++ ; return count; }
LocalDepth depth( void ) const { return _spaceRoot->maxDepth(); }
void resetNodeIndices( void ){ _nodeCount = 0 ; for( FEMTreeNode* node=_tree->nextNode() ; node ; node=_tree->nextNode( node ) ) _nodeInitializer( *node ) , node->nodeData.flags=0; }
std::vector< int > merge( FEMTree* tree );
protected:
template< class Real1 , unsigned int _Dim > static bool _IsZero( Point< Real1 , _Dim > p );
template< class Real1 > static bool _IsZero( Real1 p );
template< class SReal , class Data , unsigned int _Dim > static Data _StencilDot( Point< SReal , _Dim > p1 , Point< Data , _Dim > p2 );
template< class SReal , class Data > static Data _StencilDot( Point< SReal , 1 > p1 , Point< Data , 1 > p2 );
template< class SReal , class Data > static Data _StencilDot( SReal p1 , Point< Data , 1 > p2 );
template< class SReal , class Data > static Data _StencilDot( Point< SReal , 1 > p1 , Data p2 );
template< class SReal , class Data > static Data _StencilDot( SReal p1 , Data p2 );
// We need the signatures to test if nodes are valid
template< typename T , unsigned int ... FEMSigs , unsigned int ... CSigs , unsigned int ... FEMDegrees , unsigned int ... CDegrees , unsigned int CDim , class Coefficients >
void _addFEMConstraints( UIntPack< FEMSigs ... > , UIntPack< CSigs ... > , typename BaseFEMIntegrator::Constraint< UIntPack< FEMDegrees ... > , UIntPack< CDegrees ... > , CDim >& F , const Coefficients& coefficients , Pointer( T ) constraints , LocalDepth maxDepth ) const;
template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , unsigned int ... Degrees1 , unsigned int ... Degrees2 , class Coefficients1 , class Coefficients2 >
double _dot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , typename BaseFEMIntegrator::Constraint< UIntPack< Degrees1 ... > , UIntPack< Degrees2 ... > , 1 >& F , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot ) const;
template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , class Coefficients1 , class Coefficients2 , unsigned int PointD >
double _interpolationDot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot , const InterpolationInfo< T , PointD >* iInfo ) const;
template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , class Coefficients1 , class Coefficients2 , unsigned int PointD , unsigned int ... PointDs >
double _interpolationDot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot , const InterpolationInfo< T , PointD >* iInfo , const InterpolationInfo< T , PointDs >* ... iInfos ) const
{
return _interpolationDot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , Dot , iInfo ) + _interpolationDot< T >( UIntPack< FEMSigs1 ... >() , UIntPack< FEMSigs2 ... >() , coefficients1 , coefficients2 , Dot , iInfos... );
}
template< typename T , typename TDotT , unsigned int ... FEMSigs1 , unsigned int ... FEMSigs2 , class Coefficients1 , class Coefficients2 > double _interpolationDot( UIntPack< FEMSigs1 ... > , UIntPack< FEMSigs2 ... > , const Coefficients1& coefficients1 , const Coefficients2& coefficients2 , TDotT Dot ) const{ return 0; }
};
template< unsigned int Dim , class Real > double FEMTree< Dim , Real >::_MaxMemoryUsage = 0;
template< unsigned int Dim , class Real > double FEMTree< Dim , Real >::_LocalMemoryUsage = 0;
template< unsigned int Dim , class Real , class Vertex >
struct IsoSurfaceExtractor
{
struct IsoStats{};
template< typename Data , unsigned int ... FEMSigs , unsigned int WeightDegree , unsigned int DataSig >
static IsoStats Extract
(
UIntPack< FEMSigs ... > , UIntPack< WeightDegree > , UIntPack< DataSig > , // Dummy variables for grouping the parameter
const FEMTree< Dim , Real >& tree , // The tree over which the system is discretized
const typename FEMTree< Dim , Real >::template DensityEstimator< WeightDegree >* densityWeights , // Density weights
const SparseNodeData< ProjectiveData< Data , Real > , IsotropicUIntPack< Dim , DataSig > >* data , // Auxiliary spatial data
const DenseNodeData< Real , UIntPack< FEMSigs ... > >& coefficients , // The coefficients of the function
Real isoValue , // The value at which to extract the level-set
CoredMeshData< Vertex >& mesh , // The mesh in which to store the output
std::function< void ( Vertex& , Point< Real , Dim > , Real , Data ) > SetVertex , // A function for setting the depth and data of a vertex
bool nonLinearFit , // Should a linear interpolant be used
bool addBarycenter , // Should we triangulate polygons by adding a mid-point
bool polygonMesh , // Should we output triangles or polygons
bool flipOrientation // Should we flip the orientation
)
{
// The unspecialized implementation is not supported
WARN( "Iso-surface extraction not supported for dimension %d" , Dim );
return IsoStats();
}
};
template< unsigned int Dim , class Real >
struct FEMTreeInitializer
{
typedef RegularTreeNode< Dim , FEMTreeNodeData > FEMTreeNode;
typedef NodeAndPointSample< Dim , Real > PointSample;
template< class Data >
struct DerivativeStream
{
virtual void resolution( unsigned int res[] ) const = 0;
virtual bool nextDerivative( unsigned int idx[] , unsigned int& dir , Data& dValue ) = 0;
};
// Initialize the tree using a refinement avatar
static int Initialize( FEMTreeNode& root , int maxDepth , std::function< bool ( int , int[] ) > Refine , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
// Initialize the tree using a point stream
static int Initialize(FEMTreeNode& root, InputPointStream< Real, Dim >& pointStream, int maxDepth, std::vector< PointSample >& samplePoints, Allocator< FEMTreeNode >* nodeAllocator, std::function< void(FEMTreeNode&) > NodeInitializer);
template< class Data > static int Initialize(FEMTreeNode& root, InputPointStreamWithData< Real, Dim, Data >& pointStream, int maxDepth, std::vector< PointSample >& samplePoints, std::vector< Data >& sampleData, bool mergeNodeSamples, Allocator< FEMTreeNode >* nodeAllocator, std::function< void(FEMTreeNode&) > NodeInitializer, std::function< Real(const Point< Real, Dim >&, Data&) > ProcessData = [](const Point< Real, Dim >&, Data&) { return (Real)1.; });
// modified by dojo
template< class Data > static int Initialize(FEMTreeNode& root, InputPointStreamWithData< Real, Dim, Data >& pointStream, int maxDepth, std::vector< PointSample >& samplePoints, std::vector< Data >& sampleData, bool mergeNodeSamples, Allocator< FEMTreeNode >* nodeAllocator, std::function< void(FEMTreeNode&) > NodeInitializer, std::function< Real(const Point< Real, Dim >&, const int, bool&, Data&) > ProcessData = [](const Point< Real, Dim >&, const int, bool&, Data&) { return (Real)1.; });
// Initialize the tree using simplices
static void Initialize( FEMTreeNode& root , const std::vector< Point< Real , Dim > >& vertices , const std::vector< SimplexIndex< Dim-1 > >& simplices , int maxDepth , std::vector< PointSample >& samples , bool mergeNodeSamples , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
static void Initialize( FEMTreeNode& root , const std::vector< Point< Real , Dim > >& vertices , const std::vector< SimplexIndex< Dim-1 > >& simplices , int maxDepth , std::vector< NodeSimplices< Dim , Real > >& nodeSimplices , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
template< class Data , class _Data , bool Dual=true >
static int Initialize( FEMTreeNode& root , ConstPointer( Data ) values , ConstPointer( int ) labels , int resolution[Dim] , std::vector< NodeSample< Dim , _Data > > derivatives[Dim] , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer , std::function< _Data ( const Data& ) > DataConverter = []( const Data& d ){ return (_Data)d; } );
template< bool Dual , class Data >
static unsigned int Initialize( FEMTreeNode& root , DerivativeStream< Data >& dStream , std::vector< NodeSample< Dim , Data > > derivatives[Dim] , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
protected:
static int _AddSimplex( FEMTreeNode& root , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< PointSample >& samples , std::vector< int >* nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
static int _AddSimplex( FEMTreeNode& root , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< NodeSimplices< Dim , Real > >& simplices , std::vector< int >& nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
static int _AddSimplex( FEMTreeNode* node , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< PointSample >& samples , std::vector< int >* nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
static int _AddSimplex( FEMTreeNode* node , Simplex< Real , Dim , Dim-1 >& s , int maxDepth , std::vector< NodeSimplices< Dim , Real > >& simplices , std::vector< int >& nodeToIndexMap , Allocator< FEMTreeNode >* nodeAllocator , std::function< void ( FEMTreeNode& ) > NodeInitializer );
};
template< unsigned int Dim , class Real >
template< unsigned int ... SupportSizes >
const unsigned int FEMTree< Dim , Real >::CornerLoopData< SupportSizes ... >::supportSizes[] = { SupportSizes ... };
#include "FEMTree.inl"
#include "FEMTree.SortedTreeNodes.inl"
#include "FEMTree.WeightedSamples.inl"
#include "FEMTree.System.inl"
#include "FEMTree.Evaluation.inl"
#include "FEMTree.IsoSurface.specialized.inl"
#include "FEMTree.Initialize.inl"
#endif // FEM_TREE_INCLUDED
|
SpatialConvolutionMM.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialConvolutionMM.c"
#else
#include <ATen/div_rtn.h>
static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
THTensor *input, THTensor *gradOutput,
THTensor *weight, THTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW, int weight_nullable) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
if (weight != NULL) {
THNN_ARGCHECK(!weight->is_empty() && (weight->dim() == 2 || weight->dim() == 4), 5, weight,
"non-empty 2D or 4D weight tensor expected, but got: %s");
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0));
}
} else if (!weight_nullable) {
THError("weight tensor is expected to be non-nullable");
}
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t exactInputHeight = inputHeight + 2 * padH;
int64_t exactInputWidth = inputWidth + 2 * padW;
if (exactInputHeight < kH || exactInputWidth < kW) {
THError("Calculated padded input size per channel: (%ld x %ld). "
"Kernel size: (%d x %d). Kernel size can't be greater than actual input size",
exactInputHeight, exactInputWidth, kH, kW);
}
int64_t outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1;
int64_t outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1;
if (outputWidth < 1 || outputHeight < 1) {
THError("Given input size per channel: (%ld x %ld). "
"Calculated output size per channel: (%ld x %ld). Output size is too small",
inputHeight, inputWidth, outputHeight, outputWidth);
}
if (weight != NULL) {
int64_t nInputPlane = weight->size(1);
if (weight->dim() == 2) {
nInputPlane /= (kH * kW);
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
}
if (gradOutput != NULL) {
if (weight != NULL) {
int64_t nOutputPlane = weight->size(0);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
} else if (bias != NULL) {
int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
}
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
static THTensor* THNN_(newViewWeightMM2d)(THTensor *weight) {
weight = THTensor_(newContiguous)(weight);
if (weight->dim() == 4) {
int64_t s1 = weight->size(0);
int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3);
THTensor *old_weight = weight;
weight = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(weight), weight->storage_offset(),
s1, -1, s2, -1);
c10::raw::intrusive_ptr::decref(old_weight);
}
return weight;
}
static void THNN_(SpatialConvolutionMM_updateOutput_frame)(
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int64_t nInputPlane,
int64_t inputWidth,
int64_t inputHeight,
int64_t nOutputPlane,
int64_t outputWidth,
int64_t outputHeight)
{
int64_t i;
THTensor *output2d;
THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
output2d = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(output), output->storage_offset(),
nOutputPlane, -1,
outputHeight*outputWidth, -1);
if (bias) {
for(i = 0; i < nOutputPlane; i++)
THVector_(fill)
(THStorage_(data)(THTensor_getStoragePtr(output)) + output->storage_offset() + output->stride(0) * i,
THTensor_(get1d)(bias, i), outputHeight*outputWidth);
} else {
THTensor_(zero)(output);
}
THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput);
c10::raw::intrusive_ptr::decref(output2d);
}
void THNN_(SpatialConvolutionMM_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH)
{
weight = THNN_(newViewWeightMM2d)(weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, 0);
input = THTensor_(newContiguous)(input);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
int64_t nInputPlane = input->size(dimf);
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t nOutputPlane = weight->size(0);
int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
if(input->dim() == 3)
{
THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth);
THNN_(SpatialConvolutionMM_updateOutput_frame)
(input, output, weight, bias, finput,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size(0);
int64_t t;
THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth);
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionMM_updateOutput_frame)
(input_t, output_t, weight, bias, finput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
c10::raw::intrusive_ptr::decref(input_t);
c10::raw::intrusive_ptr::decref(output_t);
c10::raw::intrusive_ptr::decref(finput_t);
}
}
c10::raw::intrusive_ptr::decref(input);
c10::raw::intrusive_ptr::decref(weight);
}
static void THNN_(SpatialConvolutionMM_updateGradInput_frame)(
THTensor *gradInput,
THTensor *gradOutput,
THTensor *weight,
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH)
{
THTensor *gradOutput2d = THTensor_(newWithStorage2d)
(THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(),
gradOutput->size(0), -1,
gradOutput->size(1)*gradOutput->size(2), -1);
THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d);
c10::raw::intrusive_ptr::decref(gradOutput2d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH,
padW, padH,
gradInput->size(0), gradInput->size(2), gradInput->size(1),
gradOutput->size(2), gradOutput->size(1));
}
void THNN_(SpatialConvolutionMM_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH)
{
weight = THNN_(newViewWeightMM2d)(weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, 0);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
// depending on the BLAS library, fgradInput (result tensor) might
// be left uninitialized on zero alpha, which might lead to weird behavior
// hence, to be safe, zero it
THTensor_(zero)(fgradInput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 0, 1);
if(input->dim() == 3)
{
THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput,
tweight, fgradInput,
kW, kH, dW, dH, padW, padH);
}
else
{
int64_t T = input->size(0);
int64_t t;
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t,
tweight, fgradInput_t,
kW, kH, dW, dH, padW, padH);
c10::raw::intrusive_ptr::decref(gradInput_t);
c10::raw::intrusive_ptr::decref(gradOutput_t);
c10::raw::intrusive_ptr::decref(fgradInput_t);
}
}
c10::raw::intrusive_ptr::decref(tweight);
c10::raw::intrusive_ptr::decref(input);
c10::raw::intrusive_ptr::decref(gradOutput);
c10::raw::intrusive_ptr::decref(weight);
}
static void THNN_(SpatialConvolutionMM_accGradParameters_frame)(
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
scalar_t scale)
{
int64_t i;
THTensor *gradOutput2d = THTensor_(newWithStorage2d)
(THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(),
gradOutput->size(0), -1,
gradOutput->size(1)*gradOutput->size(2), -1);
if (gradWeight) {
THTensor *tfinput = THTensor_(new)();
THTensor_(transpose)(tfinput, finput, 0, 1);
THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput);
c10::raw::intrusive_ptr::decref(tfinput);
}
if (gradBias) {
for(i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++)
{
int64_t k;
scalar_t sum = 0;
scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0);
for(k = 0; k < gradOutput2d->size(1); k++)
sum += data[k];
(THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale*sum;
}
}
c10::raw::intrusive_ptr::decref(gradOutput2d);
}
void THNN_(SpatialConvolutionMM_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput, // can be NULL if gradWeight = NULL
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
accreal scale_)
{
scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
if (gradWeight) {
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
gradWeight = THNN_(newViewWeightMM2d)(gradWeight);
}
if (gradBias) {
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
}
THNN_(SpatialConvolutionMM_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, 1);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
if(input->dim() == 3)
{
THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight,
gradBias, finput, scale);
}
else
{
int64_t T = input->size(0);
int64_t t;
for(t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = NULL;
if (gradWeight) {
finput_t = THTensor_(newSelect)(finput, 0, t);
}
THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight,
gradBias, finput_t, scale);
c10::raw::intrusive_ptr::decref(gradOutput_t);
if (gradWeight) {
c10::raw::intrusive_ptr::decref(finput_t);
}
}
}
c10::raw::intrusive_ptr::decref(input);
c10::raw::intrusive_ptr::decref(gradOutput);
if (gradWeight) {
c10::raw::intrusive_ptr::decref(gradWeight);
}
}
#endif
|
resize-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file resize-inl.h
* \brief image resize operator using opencv and only support bilinear resize
* \author Jake Lee
*/
#ifndef MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#define MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#include <mxnet/base.h>
#include <vector>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "image_utils.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
template<typename DType, typename T, typename Acctype>
void ResizeImplCUDA(Stream<gpu> *s,
const T input,
const T output);
#endif // MXNET_USE_CUDA
struct ResizeParam : public dmlc::Parameter<ResizeParam> {
mxnet::Tuple<int> size;
bool keep_ratio;
int interp;
DMLC_DECLARE_PARAMETER(ResizeParam) {
DMLC_DECLARE_FIELD(size)
.set_default(mxnet::Tuple<int>())
.describe("Size of new image. Could be (width, height) or (size)");
DMLC_DECLARE_FIELD(keep_ratio)
.describe("Whether to resize the short edge or both edges to `size`, "
"if size is give as an integer.")
.set_default(false);
DMLC_DECLARE_FIELD(interp)
.set_default(1)
.describe("Interpolation method for resizing. By default uses bilinear interpolation"
"Options are INTER_NEAREST - a nearest-neighbor interpolation"
"INTER_LINEAR - a bilinear interpolation"
"INTER_AREA - resampling using pixel area relation"
"INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood"
"INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood"
"Note that the GPU version only support bilinear interpolation(1)");
}
};
// handle the keep ratio param
inline SizeParam GetHeightAndWidth(int data_h,
int data_w,
const ResizeParam& param) {
CHECK((param.size.ndim() == 1) || (param.size.ndim() == 2))
<< "Input size dimension must be 1 or 2, but got "
<< param.size.ndim();
int resized_h;
int resized_w;
if (param.size.ndim() == 1) {
CHECK_GT(param.size[0], 0)
<< "Input size should be greater than 0, but got "
<< param.size[0];
if (!param.keep_ratio) {
resized_h = param.size[0];
resized_w = param.size[0];
} else {
if (data_h > data_w) {
resized_w = param.size[0];
resized_h = static_cast<int>(data_h * resized_w / data_w);
} else {
resized_h = param.size[0];
resized_w = static_cast<int>(data_w * resized_h / data_h);
}
}
} else {
CHECK_GT(param.size[0], 0)
<< "Input width should be greater than 0, but got "
<< param.size[0];
CHECK_GT(param.size[1], 0)
<< "Input height should be greater than 0, but got "
<< param.size[1];
resized_h = param.size[1];
resized_w = param.size[0];
}
return SizeParam(resized_h, resized_w);
}
inline bool ResizeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
// input attrs should only be (h, w, c) or (n, h, w, c)
CHECK((in_attrs->at(0).ndim() == 3U) || (in_attrs->at(0).ndim() == 4U))
<< "Input image dimension should be 3 or 4 but got "
<< in_attrs->at(0).ndim();
const auto& ishape = (*in_attrs)[0];
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
SizeParam size;
if (ishape.ndim() == 3) {
size = GetHeightAndWidth(ishape[H], ishape[W], param);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({size.height, size.width, ishape[C]}));
} else {
size = GetHeightAndWidth(ishape[kH], ishape[kW], param);
SHAPE_ASSIGN_CHECK(*out_attrs, 0,
mxnet::TShape({ishape[N], size.height, size.width, ishape[kC]}));
}
return true;
}
inline void ResizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const int height,
const int width,
const int interp,
const int input_index = 0,
const int output_index = 0) {
#if MXNET_USE_OPENCV
CHECK_NE(inputs[0].type_flag_, mshadow::kFloat16) << "opencv image mat doesn't support fp16";
CHECK((inputs[0].type_flag_ != mshadow::kInt32) || (inputs[0].type_flag_ != mshadow::kInt64))
<< "opencv resize doesn't support int32, int64";
// mapping to opencv matrix element type according to channel
const int DTYPE[] = {CV_32F, CV_64F, -1, CV_8U, CV_32S};
if (inputs[0].ndim() == 3) {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[C]);
cv::Mat buf(inputs[0].shape_[H], inputs[0].shape_[W], cv_type, inputs[0].dptr_);
cv::Mat dst(outputs[0].shape_[H], outputs[0].shape_[W], cv_type, outputs[0].dptr_);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr_);
} else {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[kC]);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
cv::Mat buf(inputs[0].shape_[kH], inputs[0].shape_[kW], cv_type,
inputs[0].dptr<DType>() + input_index);
cv::Mat dst(outputs[0].shape_[kH], outputs[0].shape_[kW], cv_type,
outputs[0].dptr<DType>() + output_index);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr<DType>() + output_index);
});
}
#else
LOG(FATAL) << "Build with USE_OPENCV=1 for image resize operator.";
#endif // MXNET_USE_OPENCV
}
template <typename xpu>
inline void Resize(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(outputs.size(), 1U);
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
SizeParam size;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
CHECK(param.interp == 1) << "interp should be 1 for using Resize on GPU.";
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, DType> output = outputs[0].get<gpu, 3, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 3, DType>, float>(s, input, output);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, DType> output = outputs[0].get<gpu, 4, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 4, DType>, float>(s, input, output);
}
});
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
size = GetHeightAndWidth(inputs[0].shape_[H], inputs[0].shape_[W], param);
ResizeImpl(inputs, outputs, size.height, size.width, param.interp);
} else {
size = GetHeightAndWidth(inputs[0].shape_[kH], inputs[0].shape_[kW], param);
const auto batch_size = inputs[0].shape_[N];
const auto input_step = inputs[0].shape_[kH] * inputs[0].shape_[kW] * inputs[0].shape_[kC];
const auto output_step = size.height * size.width * inputs[0].shape_[kC];
#pragma omp parallel for
for (auto i = 0; i < batch_size; ++i) {
ResizeImpl(inputs, outputs, size.height, size.width,
param.interp, i * input_step, i * output_step);
}
}
}
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
|
Parallel Programming in C - Linear Search.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "omp.h"
/*
OpenMP implementation example
Details of implementation/tutorial can be found here: http://madhugnadig.com/articles/parallel-processing/2017/02/25/parallel-computing-in-c-using-openMP.html
*/
clock_t t;
double cpu_time_used;
int linearSearch(int* A, int n, int tos);
int main(){
int number, iter =0, find;
int* Arr;
Arr = (int *)malloc( number * sizeof(int));
scanf("%d", &number);
for(; iter<number; iter++){
scanf("%d", &Arr[iter]);
}
scanf("%d", &find);
printf("\nTo find: %d\n", find);
t = clock();
int indexx = linearSearch(Arr, number, find);
t = clock()-t;
if(indexx == -1){
printf("Not found");
}
else
printf("Found at %d\n", indexx);
cpu_time_used = ((double)t)/CLOCKS_PER_SEC;
printf("\nTime taken for search: %f", cpu_time_used);
return 0;
}
// Linear serach beigns here
int linearSearch(int* A, int n, int tos){
int foundat = -1;
//Simple OpenMP for loop in parallel
#pragma omp parallel for
for(int iter =0; iter< n; iter++){
if(A[iter] == tos)
// DO not return since it will result in an invalid branch.
foundat = iter+1;
}
// Return the index finally, after each and every element has been checked.
return foundat;
}
|
truecrypt_fmt_plug.c | /*
* TrueCrypt volume support for John The Ripper
*
* Written by Alain Espinosa <alainesp at gmail.com> in 2012. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2012 Alain Espinosa and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* (This is a heavily cut-down "BSD license".)
*
* Updated in Dec, 2014 by JimF. This is a ugly format, and was converted
* into a more standard (using crypt_all) format. The PKCS5_PBKDF2_HMAC can
* be replaced with faster pbkdf2_xxxx functions (possibly with SIMD usage).
* this has been done for sha512. ripemd160 and Whirlpool pbkdf2 header
* files have been created. Also, proper decrypt is now done, (in cmp_exact)
* and we test against the 'TRUE' signature, and against 2 crc32's which
* are computed over the 448 bytes of decrypted data. So we now have a
* full 96 bits of hash. There will be no way we get false positives from
* this slow format. AES_XTS removed. Also, we now only pbkdf2 over
* 64 bytes of data (all that is needed for the 2 AES keys), and that sped
* up the crypts A LOT (~3x faster).
*/
#include <string.h>
#include "arch.h"
#if FMT_EXTERNS_H
extern struct fmt_main fmt_truecrypt;
extern struct fmt_main fmt_truecrypt_ripemd160;
extern struct fmt_main fmt_truecrypt_ripemd160boot;
extern struct fmt_main fmt_truecrypt_sha512;
extern struct fmt_main fmt_truecrypt_whirlpool;
#elif FMT_REGISTERS_H
john_register_one(&fmt_truecrypt);
john_register_one(&fmt_truecrypt_ripemd160);
john_register_one(&fmt_truecrypt_ripemd160boot);
john_register_one(&fmt_truecrypt_sha512);
john_register_one(&fmt_truecrypt_whirlpool);
#else
#include "xts.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "crc32.h"
#include "johnswap.h"
#include "loader.h"
#include "pbkdf2_hmac_sha512.h"
#include "pbkdf2_hmac_ripemd160.h"
#include "pbkdf2_hmac_whirlpool.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 4
#else
#define OMP_SCALE 1
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
/* 64 is the actual maximum used by Truecrypt software as of version 7.1a */
#define PLAINTEXT_LENGTH 64
#define MAX_CIPHERTEXT_LENGTH (512*2+32)
#define SALT_SIZE sizeof(struct cust_salt)
#define SALT_ALIGN 4
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static unsigned char (*key_buffer)[PLAINTEXT_LENGTH + 1];
static unsigned char (*first_block_dec)[16];
#define TAG_WHIRLPOOL "truecrypt_WHIRLPOOL$"
#define TAG_SHA512 "truecrypt_SHA_512$"
#define TAG_RIPEMD160 "truecrypt_RIPEMD_160$"
#define TAG_RIPEMD160BOOT "truecrypt_RIPEMD_160_BOOT$"
#define TAG_WHIRLPOOL_LEN (sizeof(TAG_WHIRLPOOL)-1)
#define TAG_SHA512_LEN (sizeof(TAG_SHA512)-1)
#define TAG_RIPEMD160_LEN (sizeof(TAG_RIPEMD160)-1)
#define TAG_RIPEMD160BOOT_LEN (sizeof(TAG_RIPEMD160BOOT)-1)
#define IS_SHA512 1
#define IS_RIPEMD160 2
#define IS_WHIRLPOOL 3
#define IS_RIPEMD160BOOT 4
// borrowed from https://github.com/bwalex/tc-play
#define MAX_PASSSZ 64
#define PASS_BUFSZ 256
#define KPOOL_SZ 64
#define MAX_KFILE_SZ 1048576 /* 1 MB */
#define MAX_KEYFILES 256
// keyfile(s) data
unsigned char (*keyfiles_data)[MAX_KFILE_SZ];
int (*keyfiles_length);
static int *cracked;
struct cust_salt {
unsigned char salt[64];
// I 'thought' that bin[] could be removed, so that only salt[] was used
// for salt dupe-removal. That was wrong, bin[] must also be part of the
// salt dupe logic, or we will get wrong passwords found, if there is
// hashes with the same salts. bin[] array really is part of the salt
// since we decrypt it, to do the final check. So there is no real way
// to have any duplicate salts. in essense, we have a 'fixed' binary
// and the salt is the entire input hash. The fixed binary can be
// thought of as 'TRUE' (but it is more than this). It is simply we
// do not know the real binary until after we correctly decrypt.
// Initially I moved bin[] and ported to dyna_salt. All hashes in a
// test suite cracked, BUT the same password was used for all of them,
// the first password in the file. Not what we wanted.
unsigned char bin[512-64];
int loop_inc;
int num_iterations;
int hash_type;
int nkeyfiles;
} *psalt;
static struct fmt_tests tests_ripemd160[] = {
{"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" },
{"truecrypt_RIPEMD_160$6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" },
{"truecrypt_RIPEMD_160$76707c2caebf50bdc10a0f010276302299fcd07358130a8c7ddc86bd31b816b957c69f8aae7422a3e1b8dff4853d9090a818aa25801b96ac584cb37190d7d5e376a62fd08629e6416b7ee24150f8e7b963a8e7f5e9b0b71c55e84977cf03c0215c48538229e6449ba6f7dddc0db1498d39661dee4bd48cf3652459591c5300b685335dde79c5e77023bba2fc5aec436f05e23966f97b097f7f9a5b985fb7b72dd2623c3a2df694b55298b6f4c4517d119d3782d8ee644c7206678e06ec4c6eb0dc7f5c89f633cf8f77310e5e5fb8163cf6ca720a29aec1bd22abeb9a1af0fbb009c3ad7579269e747bb400f66b93e3970bae417580deb5956849314e0db1a340f78e6a5a56bb763b98dbca504825d2be2232792ff659e62ca13f2839a15bebc403305a43092dd03d2cbb3e9acc68bdf1397c48f8675de11b9230401ee97ce5c2a9bd7c4eff3e5fba7ce37f6e4ce78669fb6d3eebb9af701f8e288db98a83d4c3fdc5d816fa12693f62fe75324d5e01de459ee5f53dab714d1cf6e6da6c49f04b78c2f5dc72adcaf1a51bf85da90b5a8afdbeceb788e547893a4c284d0699a9f48c866fad8c347ecfc1a37c01f5e6fd19cbfc391d1cf89d291a320a20547a52bdb015674c9913d38f61b673573a2570f1cdf3561309434252d867c86974e945fa703e74bcbf2431a2b55bb6b4e957af3a6acd71b8e55e17f29e5654e50dc5f137", "hashcat"}, // hashcat_ripemd160_twofish.tc
{"truecrypt_RIPEMD_160$9450ade46ed2f40e1579c844dbe2c81298cd9dfd911d6c6e44eac4fb919198ce02f6ecbc70bb6fdddce4dcb834b6356b59fa974d5e30e32e4a53e552def9268a03f9bad1cd2d7ead53a9db15f08bb26b8bb3cda95976f73721b0b19e64f9fac2c9e3710c0e88e0a04ef112ab6b2190578873dfdaddd2f146eeb8378185b7f5ac8c9488b8cf2f794330a66b3f37614b702f3fe471bed923cba59fd3845ddecce1da8557f06636f3e9ab5e87d71e2bfa61bd9bc29ee9d3ea27fe84aa8cd7f0b71069a8aaf64b0566ef126c9aec1d6380010cdf1e9da7982b99552503108286bd26ebb2638ca3f594277233b3c1c75361f9c2df448d247dc62c0050cbea7427a48d4bcfc78f227cc38c4b3bdd61c7538de91f6c20728e8af42eb2d92a7b1b0de8469ccb2fadbb60dc4d23b6707482c95b4b4f68c7d7037f52b9a16f80fe643541b11f756c8c3d9a3f231898d71b326455e3719863a78fed5264ea71c6223b58eeebfb2b0ea1c831557e9efe4bd684694a4e79f565fb2615b705bad692314a0519943a718b253bf920453d36382eb04f5adf4cf3e8f471934bd9a39997d8a2418e7320ac32a6470637f22b66b9e72e3d43f26cd5083ac6fb80d20c799dcc01127ba19659b5188644df41071b30de743df3c9670bb86e37f1751c8c02d21c9eda60c2653293b7d3f9480fc13d46d5d89c8737d541c385fea823a147c96a129047e127", "hashcat"}, // hashcat_ripemd160_serpent.tc
{NULL}
};
static struct fmt_tests tests_ripemd160boot[] = {
{"truecrypt_RIPEMD_160_BOOT$2b5da9924119fde5270f712ba3c3e4974460416e8465f222149499908c2fca0a4753b581f26625d11c4d3f49bdeb1c95bc3e17629d7e19ffb66175e5feab90a4fd670194f95d578266f3f54e61b82dc00efc2bb4438e19c3f6d7a92825a7625d88ec6286ab4e1761749edc83dad4340fd167544f09913fd6b03775013ff232fc4dad6f726ef82ad4bd1c5227a7796d7db35a912beeda5b0cdd798bc34d3ac24403c87dc672a983687dd64f920c991840a56105a6311797eed9976014909700366420673f6455242c71151ac75903a353538ec24b4feb967e2b46886395cf3e934e83a6a58ef2c0180273a0c33ba2bd870b1d84afb03d5558dc17bc7fb586404ad9a7e506ed859540110c6ad73f0f1d2be47829bc666e1838ec3f1dc1f610206241ce07fbf2542ecef9348b37aa460815794ca582709697cbf0c90c3dae4cb9dd97b29d3c7d82bd8d0c81d708e74c7007468c6c55a40fd4f803a4f5a75818d7da0d1ef333b8622e7de516fa62a6fa2b8d6d5d23653dfcedffec771456ee204e5c85ee88defbe195462fbe8ce0e2a5a455dab66478b877ec37dfa66f19ab5201c56cd707ba7bee1b10360965d3868c1fdf91dda124b1b0994fee75848083d19369735905bd2864b496c6e35ecf96f6dd4728570a45746bcf8d7d0ec0b9b0b112b28fdc53efcfa7d0558c132cd683a742d62b34304d9f991029c8aedc3d8767da8c", "hashcat"}, // hashcat_ripemd160_aes_boot.tc
{"truecrypt_RIPEMD_160_BOOT$3f6e7171e8c7d0f6ba03defcfbdef5de8d43984b21c3422bbce2357e1434334ac8af920e736ba4006094ad426524ceb491248a381ea154c37889a916f40c4823b4c68bcfbf8548b93830411a8b27746c6dd99c2a1ed0920947139a54fcf6315965730e85f9b8ced69a7f1c4336c63a351606ea577b8a5caa6b86cdac846534ba62735170350ad89c1f355165690439c4d0d8f6c6b2d0ef5f68958a10edac4b553d070f27e34291141b85e4502ae12721a8e7cb8bb1e8b8bfd57e4a92ab5364d4bb3415cfb1d12620ec17a6de1b050df122c03210b723fffc477c4d5b65319a269ae9dc18bd2fa55d2bc00b23b3824ddbfa7941fdee4a99afda93bb34bd7b613a9f2db7ec5f2947dfadf8fec021e9cb6d5acd2d23bf86f9fb699eb7faed555ff32be301f0ac35020221528808f1c00c95b14b791bf678dc234a339ac0c7c1c8131caecfabd2778e26503a967a592b5f36173e763b2d7db7f826a36368e7e88eb8c7d587b6a1eae544800f499ff0b4937cfa07861331e699265e61081a612c0075ff95f77ea9be731cc2aa285024548c506d2e89acd07f39250238e2f4e319b789ca7d4163482211839787c9cdd3189c7a4525d4e2e38d7f5affb2ed5d7cf3de9ea14c50f51a2f307248758e6eca8179cb8e2d7397f6b1818a9c25131b7fac5c12efc5e952e8e04af9fb3432548378b4a20471fad934cd7c9983af7830e2f6a202", "hashcat"}, // hashcat_ripemd160_twofish_boot.tc
{"truecrypt_RIPEMD_160_BOOT$e4cad3bc157ded7d28b8111f6967617a1e920bc4238fd2293b0f6f943d692e97b9b4ab9f75df7010cdbd1d20cf9775b85aad6101ac2ca499c7d592ce8c47fb16c01146d61f54457b15d9a70683713437671fd8ca0b83175058072d34f5737b59caf8b63f6244cb95d748cea3589111d20b73d2750d7fd18a791dd85aa96a6c65d5387f08c80f735824833314ca98ac5d1a52e055fb2bac663457b248aec33773fbb0b73586d62d2e44991ebf68af4fab38954385d9addd88e03f819ccf959d502888a70edc01d0a924f957fd3f8db037f0c65068ab46ccd00ac12df649c9735e20bd3d5411d0285918926a23a6d9b8dff315d245b6156fe49ef5ebcc35b98c43568cdf4585e9d96d9b3e9d252df2f34528a9bc9739ca4c906bbcea34f6704cf417493067839183c85c6553ab1ecbb552b85434c892154f0fe341f7b7f21a4052619b77db2be345ddd01b6ecc72eb424a78fc10379785fba362a2a86f1a9660fc0bb5408516571c103983ea5d43f7fae5688a59ef1ac913c96012c66dbb67f86aaff82fe52dc4ac3ba4c7737efbfae84de2138bc41650bb62fa41232f5e7a51f3a07316234d1ebe2cfee38380902aae2d338653bff0fd97f83d971c9193870734a2b0f16ac52aaa4c4f1c29fc7e6619cacbf72fed79f3ad3db214b0e32a56c5283865c3af8d8c17f76e608309ab266abd4abad72d0cc96e4c6453a4633ee86aa9", "hashcat"}, // hashcat_ripemd160_serpent_boot.tc
{NULL},
};
static struct fmt_tests tests_sha512[] = {
{"truecrypt_SHA_512$aa582afe64197a3cfd4faf7697673e5e14414369da3f716400414f63f75447da7d3abdc65a25ea511b1772d67370d6c349d8000de66d65861403093fecfb85719e1d46158d24324e5a2c0ee598214b1b2e7eac761dbde8cb85bcb33f293df7f30c9e44a3fa97bf1c70e9986677855873fa2435d9154ccaed8f28d68f16b10adcce7032d7c1742d322739d02c05457859abdaa176faa95c674d2a1092c30832dd2afd9a319599b4d1db92ffe6e48b3b29e566d5c51af091839699f5ad1715730fef24e94e39a6f40770b8320e30bf972d810b588af88ce3450337adbec0a10255b20230bcfca93aa5a0a6592cd6038312181c0792c59ec9e5d95a6216497d39ae28131869b89368e82371718970bf9750a7114c83d87b1b0cd16b6e8d41c4925d15ec26107e92847ec1bb73363ca10f3ad62afa8b0f95ff13cdbe217a1e8a74508ef439ed2140b26d5538b8d011a0d1e469f2a6962e56964adc75b90d9c6a16e88ad0adb59a337f8abb3f9d76f7f9acad22853e9dbbce13a4f686c6a802243b0901972af3c6928511609ac7b957b352452c4347acd563a72faa86a46522942fdc57f32d48c5148a2bb0bc2c3dbc9851385f816f2ece958957082c0a8fe69f647be675d87fcb8244912abc277a3242ee17e1d522f85598417559cb3a9f60b755e5b613069cb54c05a4c5d2fbd3ca6ba793320aeb0e109f8b21852daf2d9ed74dd9", "password"},
{"truecrypt_SHA_512$73f6b08614dc4ffbd77d27a0815b0700d6b612f573ccd6c8937e8d154321e3c1c1c67dd348d4d3bc8304e94a3a6ec0c672de8396a9a6b26b12393195b7daa4225a9d3a134229be011f8179791bb00c31b5c132c8dbad5a6f8738487477c409b3c32d90b07be8d7a3a9faa95d37ab6faccc459d47f029e25adcea48cee83eaa35b7acc3f849717000421d92ac46e6f16ec3dccacd3ffae76a48280977d2a6727027d9d6ff9c4c98405359ee382f6dd1eca0d7007cbe804b81485c1085e74b58d3eb1e3c7ebdc1e1ab1384e4440ab6ca7beed7e0ef7d1e0da5ffc3cd89f7b6ac8a9257ee369d397ac1e112f75382ddbe6f7317ec20c46cb7b2111d0d91570e90b4c01a0b8205fcdf4d0cadcf4a067b8f285a541f1d649894fb3ade29a2ee0575524455d489c299dde215bea3254f7d43aa4e4011a39bdb6e7473bc29f588e659fdbf065cc4a336ba42f2b6c07479cf3e544978150fb013da7db22afcb4f8384e39e2edfa30a4cbe5e84a07c54ba66663bb9284836cc5a8ba7489d3f7f92aec6d9f4e264c90c2af6181082bd273197bc42c325cb1de31006dd55425e3f210d2ddd7973978eec865d3226bb1e30a9897146d90d79a73070e87f0182981ea85f15f948ae1958af7704fabecd6f07e20be70be9f9c38a5c5e5c8b17be648f011b2c40f62d6ac51de932add5bdb47bb428fd510b004a7aa79321b03ed7aa202be439fbf", "password" },
{"truecrypt_SHA_512$cfd9e5757da139b32d117cd60f86f649400615dc218981106dfadd44598599a7ec0ace42de61506fe8d81b5c885861cdb26e0c38cb9adfcff27ba88872220ccd0914d4fa44bab5a708fe6864e0f665ac71d87e7e97b3724d610cf1f6ec09fa99da40126f63868654fed3381eaa8176f689e8e292c3cb68e43601d5804bc2e19d86722c21d42204e158b26b720e7b8f7580edce15469195dd7ed711b0fcb6c8abc253d0fd93cc784d5279de527fbdcfb357780635a5c363b773b55957d7efb472f6e6012489a9f0d225573446e5251cfb277a1365eed787e0da52f02d835667d74cc41fa4002cc35ad1ce276fbf9d73d6553ac0f8ab6961901d292a66df814a2cbda1b41f29aeec88ed15e7d37fe84ac5306b5a1b8d2e1f2c132e5c7d40ca7bb76d4ff87980ca4d75eaac5066b3ed50b53259554b9f922f7cee8e91847359d06e448da02cbeeecc78ca9bee2899a33dfa04a478ca131d33c64d6de5f81b219f11bed6ff3c0d56f26b3a27c79e7c55b6f76567a612166ce71028e3d3ae7e5abd25faec5e2e9dc30719baa2c138e26d6f8e3799a72b5e7b1c2a07c12cea452073b72f6e429bb17dd23fe3934c9e406bb4060083f92aa100c2e82ca40664f65c02cbc800c5696659f8df84db17edb92de5d4f1ca9e5fe71844e1e8c4f8b19ce7362fb3ca5467bf65122067c53f011648a6663894b315e6c5c635bec5bd39da028041", "123" },
/* test vector with single keyfile, with data "1234567" */
{NULL}
};
static struct fmt_tests tests_whirlpool[] = {
{"truecrypt_WHIRLPOOL$5724ba89229d705010ec56af416b16155682a0cab9cf48ac5a5fdd2086c9a251ae4bbea6cfb8464321a789852f7812095b0e0c4c4f9c6d14ba7beedaf3484b375ac7bc97b43c3e74bf1a0c259b7ac8725d990d2ff31935ca3443f2ce8df59de86515da3e0f53f728882b71c5cc704df0c87c282a7413db446e9a2e516a144311dd25092eb0a2c5df0240d899708289fc7141abd8538fa5791d9f96c39129cce9fe8a6e58e84364e2f4acc32274147431cb2d2480b1b54bffee485acee0925852b8a6ee71d275f028b92e540be595448e5f1d78560a3b8ad209962dd5981d7ca98db9a678a588a9296157d44502cd78f9e32f022dddc9bc8111b5704ee39a9b56d30b89898ae340e90f2e6c73be6ac64de97e32fc2eed0b66dcd5c1553eeab3950cf851624a5a4439435a6fd5717fda6d5f939f4a902321341964c16bda8975752ba150fb9d858d8eaff2a2086cb50d30abff741ee20223b4223b1783f0ed537a609a081afed952395ef0b5de6883db66cbb5a8bac70f2f757c7b6e6bb5d863672820f0d3d61b262b2b6c2ca0dc8e7137851aa450da1c1d915e005bff0e849a89bf67693ef97f5c17bf8d07a18c562dc783274f9ec580f9519a6dd1429b66160ddb04549506ad616dd0695da144fa2ad270eac7163983e9036f1bde3c7634b8a246b8dcd518ce3e12b881c838fbce59a0cfdffa3b21447e3f28124f63549c3962", "password" },
{"truecrypt_WHIRLPOOL$0650595770851981d70b088ff6ef4bf90573e08d03c8cac8b2dfded22e1653f5c45103758c68be344fdccae42b4683087da083a3841b92fb79856798eaee793c04cd95ae556d9616684da17e47bd2f775d8128f94b80b781e4cab4921b12c620721cf719ca72d3997cea829fd29b429282b597d5719c13423cdf7bd717fa12a56b8eddcf7b1ad2796c4ad078ab3a9bd944a694aa4b0078ed160440dd3db13dd1d04a7aaaa4dc016a95bd1cfafcd833ae933c627bf5512ae55c76069af7190823dba0133d6fe02e4421d3684ff2a2493da990a3cc5eed40a9e8c48c7a89a2f47030d45c324a3d78b941e772e24b285af6739ae1f5953ff838edaa69e79939f55d0fe00cd0e3a20a46db3a232009eabc800711342f7e580ba909f16c2039d4900fd4025845a385641a6037ceb6420fe7d37868e8c06e6146eddec9e6cb97e71048da5fa5898dac08152516ea1c6729e85d31596cd226aa218ce693989efb9fa8b05404bcc2debbc75c429a03fe31bfc49f10d595b898436ff6b02fc01d745b91280f26ae94a4969ce7f86c12e6b562c7b5377e3fb3247a8cda11a930c2a9e80f24966925de01afad5987ebee9c3de1d41667c6dc35cebbbc963f263c700d06a647ab7020385e3a7e30406f3e7a9b3142d39e0439c98948134d11166b621dfd3ea9d3a84d985b2aa7732b7ad9beba44334dd86292b0c94befb2cb8aa72a823129cb", "123" },
{NULL}
};
static struct fmt_tests tests_all[] = {
{"truecrypt_SHA_512$aa582afe64197a3cfd4faf7697673e5e14414369da3f716400414f63f75447da7d3abdc65a25ea511b1772d67370d6c349d8000de66d65861403093fecfb85719e1d46158d24324e5a2c0ee598214b1b2e7eac761dbde8cb85bcb33f293df7f30c9e44a3fa97bf1c70e9986677855873fa2435d9154ccaed8f28d68f16b10adcce7032d7c1742d322739d02c05457859abdaa176faa95c674d2a1092c30832dd2afd9a319599b4d1db92ffe6e48b3b29e566d5c51af091839699f5ad1715730fef24e94e39a6f40770b8320e30bf972d810b588af88ce3450337adbec0a10255b20230bcfca93aa5a0a6592cd6038312181c0792c59ec9e5d95a6216497d39ae28131869b89368e82371718970bf9750a7114c83d87b1b0cd16b6e8d41c4925d15ec26107e92847ec1bb73363ca10f3ad62afa8b0f95ff13cdbe217a1e8a74508ef439ed2140b26d5538b8d011a0d1e469f2a6962e56964adc75b90d9c6a16e88ad0adb59a337f8abb3f9d76f7f9acad22853e9dbbce13a4f686c6a802243b0901972af3c6928511609ac7b957b352452c4347acd563a72faa86a46522942fdc57f32d48c5148a2bb0bc2c3dbc9851385f816f2ece958957082c0a8fe69f647be675d87fcb8244912abc277a3242ee17e1d522f85598417559cb3a9f60b755e5b613069cb54c05a4c5d2fbd3ca6ba793320aeb0e109f8b21852daf2d9ed74dd9", "password"},
{"truecrypt_SHA_512$73f6b08614dc4ffbd77d27a0815b0700d6b612f573ccd6c8937e8d154321e3c1c1c67dd348d4d3bc8304e94a3a6ec0c672de8396a9a6b26b12393195b7daa4225a9d3a134229be011f8179791bb00c31b5c132c8dbad5a6f8738487477c409b3c32d90b07be8d7a3a9faa95d37ab6faccc459d47f029e25adcea48cee83eaa35b7acc3f849717000421d92ac46e6f16ec3dccacd3ffae76a48280977d2a6727027d9d6ff9c4c98405359ee382f6dd1eca0d7007cbe804b81485c1085e74b58d3eb1e3c7ebdc1e1ab1384e4440ab6ca7beed7e0ef7d1e0da5ffc3cd89f7b6ac8a9257ee369d397ac1e112f75382ddbe6f7317ec20c46cb7b2111d0d91570e90b4c01a0b8205fcdf4d0cadcf4a067b8f285a541f1d649894fb3ade29a2ee0575524455d489c299dde215bea3254f7d43aa4e4011a39bdb6e7473bc29f588e659fdbf065cc4a336ba42f2b6c07479cf3e544978150fb013da7db22afcb4f8384e39e2edfa30a4cbe5e84a07c54ba66663bb9284836cc5a8ba7489d3f7f92aec6d9f4e264c90c2af6181082bd273197bc42c325cb1de31006dd55425e3f210d2ddd7973978eec865d3226bb1e30a9897146d90d79a73070e87f0182981ea85f15f948ae1958af7704fabecd6f07e20be70be9f9c38a5c5e5c8b17be648f011b2c40f62d6ac51de932add5bdb47bb428fd510b004a7aa79321b03ed7aa202be439fbf", "password" },
{TAG_SHA512"cfd9e5757da139b32d117cd60f86f649400615dc218981106dfadd44598599a7ec0ace42de61506fe8d81b5c885861cdb26e0c38cb9adfcff27ba88872220ccd0914d4fa44bab5a708fe6864e0f665ac71d87e7e97b3724d610cf1f6ec09fa99da40126f63868654fed3381eaa8176f689e8e292c3cb68e43601d5804bc2e19d86722c21d42204e158b26b720e7b8f7580edce15469195dd7ed711b0fcb6c8abc253d0fd93cc784d5279de527fbdcfb357780635a5c363b773b55957d7efb472f6e6012489a9f0d225573446e5251cfb277a1365eed787e0da52f02d835667d74cc41fa4002cc35ad1ce276fbf9d73d6553ac0f8ab6961901d292a66df814a2cbda1b41f29aeec88ed15e7d37fe84ac5306b5a1b8d2e1f2c132e5c7d40ca7bb76d4ff87980ca4d75eaac5066b3ed50b53259554b9f922f7cee8e91847359d06e448da02cbeeecc78ca9bee2899a33dfa04a478ca131d33c64d6de5f81b219f11bed6ff3c0d56f26b3a27c79e7c55b6f76567a612166ce71028e3d3ae7e5abd25faec5e2e9dc30719baa2c138e26d6f8e3799a72b5e7b1c2a07c12cea452073b72f6e429bb17dd23fe3934c9e406bb4060083f92aa100c2e82ca40664f65c02cbc800c5696659f8df84db17edb92de5d4f1ca9e5fe71844e1e8c4f8b19ce7362fb3ca5467bf65122067c53f011648a6663894b315e6c5c635bec5bd39da028041", "123" },
{"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" },
{TAG_RIPEMD160"6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" },
{"truecrypt_WHIRLPOOL$5724ba89229d705010ec56af416b16155682a0cab9cf48ac5a5fdd2086c9a251ae4bbea6cfb8464321a789852f7812095b0e0c4c4f9c6d14ba7beedaf3484b375ac7bc97b43c3e74bf1a0c259b7ac8725d990d2ff31935ca3443f2ce8df59de86515da3e0f53f728882b71c5cc704df0c87c282a7413db446e9a2e516a144311dd25092eb0a2c5df0240d899708289fc7141abd8538fa5791d9f96c39129cce9fe8a6e58e84364e2f4acc32274147431cb2d2480b1b54bffee485acee0925852b8a6ee71d275f028b92e540be595448e5f1d78560a3b8ad209962dd5981d7ca98db9a678a588a9296157d44502cd78f9e32f022dddc9bc8111b5704ee39a9b56d30b89898ae340e90f2e6c73be6ac64de97e32fc2eed0b66dcd5c1553eeab3950cf851624a5a4439435a6fd5717fda6d5f939f4a902321341964c16bda8975752ba150fb9d858d8eaff2a2086cb50d30abff741ee20223b4223b1783f0ed537a609a081afed952395ef0b5de6883db66cbb5a8bac70f2f757c7b6e6bb5d863672820f0d3d61b262b2b6c2ca0dc8e7137851aa450da1c1d915e005bff0e849a89bf67693ef97f5c17bf8d07a18c562dc783274f9ec580f9519a6dd1429b66160ddb04549506ad616dd0695da144fa2ad270eac7163983e9036f1bde3c7634b8a246b8dcd518ce3e12b881c838fbce59a0cfdffa3b21447e3f28124f63549c3962", "password" },
{TAG_WHIRLPOOL"0650595770851981d70b088ff6ef4bf90573e08d03c8cac8b2dfded22e1653f5c45103758c68be344fdccae42b4683087da083a3841b92fb79856798eaee793c04cd95ae556d9616684da17e47bd2f775d8128f94b80b781e4cab4921b12c620721cf719ca72d3997cea829fd29b429282b597d5719c13423cdf7bd717fa12a56b8eddcf7b1ad2796c4ad078ab3a9bd944a694aa4b0078ed160440dd3db13dd1d04a7aaaa4dc016a95bd1cfafcd833ae933c627bf5512ae55c76069af7190823dba0133d6fe02e4421d3684ff2a2493da990a3cc5eed40a9e8c48c7a89a2f47030d45c324a3d78b941e772e24b285af6739ae1f5953ff838edaa69e79939f55d0fe00cd0e3a20a46db3a232009eabc800711342f7e580ba909f16c2039d4900fd4025845a385641a6037ceb6420fe7d37868e8c06e6146eddec9e6cb97e71048da5fa5898dac08152516ea1c6729e85d31596cd226aa218ce693989efb9fa8b05404bcc2debbc75c429a03fe31bfc49f10d595b898436ff6b02fc01d745b91280f26ae94a4969ce7f86c12e6b562c7b5377e3fb3247a8cda11a930c2a9e80f24966925de01afad5987ebee9c3de1d41667c6dc35cebbbc963f263c700d06a647ab7020385e3a7e30406f3e7a9b3142d39e0439c98948134d11166b621dfd3ea9d3a84d985b2aa7732b7ad9beba44334dd86292b0c94befb2cb8aa72a823129cb", "123" },
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key_buffer = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*key_buffer));
first_block_dec = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*first_block_dec));
keyfiles_data = mem_calloc(MAX_KEYFILES,
sizeof(*keyfiles_data));
keyfiles_length = mem_calloc(MAX_KEYFILES,
sizeof(int));
cracked = mem_calloc(sizeof(*cracked),
self->params.max_keys_per_crypt);
Twofish_initialise();
}
static void done(void)
{
MEM_FREE(first_block_dec);
MEM_FREE(key_buffer);
MEM_FREE(keyfiles_data);
MEM_FREE(keyfiles_length);
MEM_FREE(cracked);
}
static int valid(char* ciphertext, int pos)
{
unsigned int i;
char *p, *q;
int nkeyfiles = -1;
p = ciphertext + pos;
q = strchr(p, '$');
if (!q) { /* no keyfiles */
if (pos + 512 * 2 != strlen(ciphertext))
return 0;
} else {
if (q - p != 512 * 2)
return 0;
/* check keyfile(s) */
p = q + 1;
nkeyfiles = atoi(p);
if (nkeyfiles > MAX_KEYFILES || nkeyfiles < 1)
return 0;
}
// Not hexadecimal characters
for (i = 0; i < 512 * 2; i++) {
if (atoi16l[ARCH_INDEX((ciphertext+pos)[i])] == 0x7F)
return 0;
}
return 1;
}
static int valid_ripemd160(char* ciphertext, struct fmt_main *self)
{
// Not a supported hashing
if (strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN))
return 0;
return valid(ciphertext, TAG_RIPEMD160_LEN);
}
static int valid_ripemd160boot(char* ciphertext, struct fmt_main *self)
{
if (strncmp(ciphertext, TAG_RIPEMD160BOOT, TAG_RIPEMD160BOOT_LEN))
return 0;
return valid(ciphertext, TAG_RIPEMD160BOOT_LEN);
}
static int valid_sha512(char* ciphertext, struct fmt_main *self)
{
// Not a supported hashing
if (strncmp(ciphertext, TAG_SHA512, TAG_SHA512_LEN))
return 0;
return valid(ciphertext, TAG_SHA512_LEN);
}
static int valid_whirlpool(char* ciphertext, struct fmt_main *self)
{
// Not a supported hashing
if (strncmp(ciphertext, TAG_WHIRLPOOL, TAG_WHIRLPOOL_LEN))
return 0;
return valid(ciphertext, TAG_WHIRLPOOL_LEN);
}
static int valid_truecrypt(char *ciphertext, struct fmt_main *self) {
if (valid_sha512(ciphertext, self) ||
valid_ripemd160(ciphertext, self) ||
valid_ripemd160boot(ciphertext, self) ||
valid_whirlpool(ciphertext, self))
return 1;
return 0;
}
static void set_salt(void *salt)
{
psalt = salt;
}
static void* get_salt(char *ciphertext)
{
static char buf[sizeof(struct cust_salt)+4];
struct cust_salt *s = (struct cust_salt *)mem_align(buf, 4);
unsigned int i;
char tpath[PATH_BUFFER_SIZE] = {0};
char *p, *q;
int idx;
FILE *fp;
size_t sz;
memset(s, 0, sizeof(struct cust_salt));
s->num_iterations = 1000;
s->loop_inc = 1;
if (!strncmp(ciphertext, TAG_WHIRLPOOL, TAG_WHIRLPOOL_LEN)) {
ciphertext += TAG_WHIRLPOOL_LEN;
s->hash_type = IS_WHIRLPOOL;
} else if (!strncmp(ciphertext, TAG_SHA512, TAG_SHA512_LEN)) {
ciphertext += TAG_SHA512_LEN;
s->hash_type = IS_SHA512;
#if SSE_GROUP_SZ_SHA512
s->loop_inc = SSE_GROUP_SZ_SHA512;
#endif
} else if (!strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN)) {
ciphertext += TAG_RIPEMD160_LEN;
s->hash_type = IS_RIPEMD160;
s->num_iterations = 2000;
} else if (!strncmp(ciphertext, TAG_RIPEMD160BOOT, TAG_RIPEMD160BOOT_LEN)) {
ciphertext += TAG_RIPEMD160BOOT_LEN;
s->hash_type = IS_RIPEMD160BOOT;
s->num_iterations = 1000;
} else {
// should never get here! valid() should catch all lines that do not have the tags.
fprintf(stderr, "Error, unknown type in truecrypt::get_salt(), [%s]\n", ciphertext);
error();
}
// Convert the hexadecimal salt in binary
for (i = 0; i < 64; i++)
s->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
for (; i < 512; i++)
s->bin[i-64] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])];
p = ciphertext;
q = strchr(p, '$');
if (!q) /* no keyfiles */
return s;
// process keyfile(s)
p = q + 1;
s->nkeyfiles = atoi(p);
for (idx = 0; idx < s->nkeyfiles; idx++) {
p = strchr(p, '$') + 1; // at first filename
q = strchr(p, '$');
if (!q) { // last file
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, sizeof(tpath));
} else {
memset(tpath, 0, sizeof(tpath) - 1);
strncpy(tpath, p, q-p);
}
/* read this into keyfiles_data[idx] */
fp = fopen(tpath, "rb");
if (!fp)
pexit("fopen %s", p);
if (fseek(fp, 0L, SEEK_END) == -1)
pexit("fseek");
sz = ftell(fp);
if (fseek(fp, 0L, SEEK_SET) == -1)
pexit("fseek");
if (fread(keyfiles_data[idx], 1, sz, fp) != sz)
pexit("fread");
keyfiles_length[idx] = sz;
fclose(fp);
}
return s;
}
static int apply_keyfiles(unsigned char *pass, size_t pass_memsz, int nkeyfiles)
{
int pl, k;
unsigned char *kpool;
unsigned char *kdata;
int kpool_idx;
size_t i, kdata_sz;
uint32_t crc;
if (pass_memsz < MAX_PASSSZ) {
error();
}
pl = strlen((char *)pass);
memset(pass+pl, 0, MAX_PASSSZ-pl);
if ((kpool = mem_calloc(1, KPOOL_SZ)) == NULL) {
error();
}
for (k = 0; k < nkeyfiles; k++) {
kpool_idx = 0;
kdata_sz = keyfiles_length[k];
kdata = keyfiles_data[k];
crc = ~0U;
for (i = 0; i < kdata_sz; i++) {
crc = jtr_crc32(crc, kdata[i]);
kpool[kpool_idx++] += (unsigned char)(crc >> 24);
kpool[kpool_idx++] += (unsigned char)(crc >> 16);
kpool[kpool_idx++] += (unsigned char)(crc >> 8);
kpool[kpool_idx++] += (unsigned char)(crc);
/* Wrap around */
if (kpool_idx == KPOOL_SZ)
kpool_idx = 0;
}
}
/* Apply keyfile pool to passphrase */
for (i = 0; i < KPOOL_SZ; i++)
pass[i] += kpool[i];
MEM_FREE(kpool);
return 0;
}
// compare a BE string crc32, against crc32, and do it in a safe for non-aligned CPU way.
// this function is not really speed critical.
static int cmp_crc32s(unsigned char *given_crc32, CRC32_t comp_crc32) {
return given_crc32[0] == ((comp_crc32>>24)&0xFF) &&
given_crc32[1] == ((comp_crc32>>16)&0xFF) &&
given_crc32[2] == ((comp_crc32>> 8)&0xFF) &&
given_crc32[3] == ((comp_crc32>> 0)&0xFF);
}
static int decrypt_and_verify(unsigned char *key, int algorithm)
{
unsigned char decr_header[512-64];
CRC32_t check_sum;
// We have 448 bytes of header (64 bytes unencrypted salt were the
// first 64 bytes). Decrypt it and look for 3 items.
switch (algorithm) {
case 0:
XTS_decrypt(key, decr_header, psalt->bin, 512-64, 256, 0);
break;
case 1:
XTS_decrypt(key, decr_header, psalt->bin, 512-64, 256, 1);
// Twofish_XTS_decrypt(key, decr_header, psalt->bin, 512-64, 256);
break;
case 2:
XTS_decrypt(key, decr_header, psalt->bin, 512-64, 256, 2);
// Serpent_XTS_decrypt(key, decr_header, psalt->bin, 512-64, 256);
break;
}
// First item we look for is a contstant string 'TRUE' in the first 4 bytes.
if (memcmp(decr_header, "TRUE", 4))
return 0;
// Now we look for 2 crc values. At offset 8 is the first. This provided
// CRC should be the crc32 of the last 256 bytes of the buffer.
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, &decr_header[256-64], 256);
if (!cmp_crc32s(&decr_header[8], ~check_sum))
return 0;
// Now we compute crc of the first part of the buffer, up to 4 bytes less than
// the start of that last 256 bytes (i.e. 188 bytes in total). Following this
// buffer we compute crc32 over, should be a 4 byte block that is what we are
// given as a match for this crc32 (of course, those 4 bytes are not part of
// the crc32. The 4 bytes of provided crc32 is the only 4 bytes of the header
// which are not placed into 'some' CRC32 computation.
CRC32_Init(&check_sum);
CRC32_Update(&check_sum, decr_header, 256-64-4);
if (!cmp_crc32s(&decr_header[256-64-4], ~check_sum))
return 0;
// Passed 96 bits of tests. This is the right password!
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
memset(cracked, 0, sizeof(cracked[0]) * count);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i+=psalt->loop_inc)
{
unsigned char key[64];
#if SSE_GROUP_SZ_SHA512
unsigned char Keys[SSE_GROUP_SZ_SHA512][64];
#endif
int j;
int ksz = strlen((char *)key_buffer[i]);
#if SSE_GROUP_SZ_SHA512
if (psalt->hash_type != IS_SHA512)
#endif
{
strncpy((char*)key, (char*)key_buffer[i], 64);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(key, 64, psalt->nkeyfiles);
ksz = 64;
}
}
#if SSE_GROUP_SZ_SHA512
if (psalt->hash_type == IS_SHA512) {
int lens[SSE_GROUP_SZ_SHA512];
unsigned char *pin[SSE_GROUP_SZ_SHA512];
union {
unsigned char *pout[SSE_GROUP_SZ_SHA512];
unsigned char *poutc;
} x;
for (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) {
lens[j] = strlen((char*)(key_buffer[i+j]));
strncpy((char*)Keys[j], (char*)key_buffer[i+j], 64);
/* process keyfile(s) */
if (psalt->nkeyfiles) {
apply_keyfiles(Keys[j], 64, psalt->nkeyfiles);
lens[j] = 64;
}
pin[j] = key_buffer[i+j];
x.pout[j] = Keys[j];
}
pbkdf2_sha512_sse((const unsigned char **)pin, lens, psalt->salt, 64, psalt->num_iterations, &(x.poutc), sizeof(key), 0);
}
#else
if (psalt->hash_type == IS_SHA512) {
pbkdf2_sha512((const unsigned char*)key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
}
#endif
else if (psalt->hash_type == IS_RIPEMD160 || psalt->hash_type == IS_RIPEMD160BOOT)
pbkdf2_ripemd160((const unsigned char*)key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
else
pbkdf2_whirlpool((const unsigned char*)key, ksz, psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0);
for (j = 0; j < psalt->loop_inc; ++j) {
#if SSE_GROUP_SZ_SHA512
if (psalt->hash_type == IS_SHA512)
memcpy(key, Keys[j], sizeof(key));
#endif
cracked[i+j] = 0;
if (decrypt_and_verify(key, 0)) // AES
cracked[i+j] = 1;
else {
if (decrypt_and_verify(key, 1)) // Twofish
cracked[i+j] = 1;
else {
if (decrypt_and_verify(key, 2)) // Serpent
cracked[i+j] = 1;
}
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int idx)
{
return 1;
}
static void set_key(char* key, int index)
{
strcpy((char*)(key_buffer[index]), key);
}
static char *get_key(int index)
{
return (char*)(key_buffer[index]);
}
static int salt_hash(void *salt)
{
unsigned v=0, i;
struct cust_salt *psalt = (struct cust_salt *)salt;
for (i = 0; i < 64; ++i) {
v *= 11;
v += psalt->salt[i];
}
return v & (SALT_HASH_SIZE - 1);
}
static unsigned int tc_hash_algorithm(void *salt)
{
return (unsigned int)((struct cust_salt*)salt)->hash_type;
}
struct fmt_main fmt_truecrypt = {
{
"tc_aes_xts", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
#if SSE_GROUP_SZ_SHA512
"SHA512 " SHA512_ALGORITHM_NAME " /RIPEMD160/WHIRLPOOL",
#else
#if ARCH_BITS >= 64
"SHA512 64/" ARCH_BITS_STR " /RIPEMD160/WHIRLPOOL",
#else
"SHA512 32/" ARCH_BITS_STR " /RIPEMD160/WHIRLPOOL",
#endif
#endif
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
#if SSE_GROUP_SZ_SHA512
SSE_GROUP_SZ_SHA512,
SSE_GROUP_SZ_SHA512,
#else
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#endif
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"hash algorithm [1:SHA512 2:RIPEMD160 3:Whirlpool]",
},
{
TAG_WHIRLPOOL,
TAG_SHA512,
TAG_RIPEMD160
},
tests_all
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_truecrypt,
fmt_default_split,
fmt_default_binary,
get_salt,
{
tc_hash_algorithm,
},
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_truecrypt_ripemd160 = {
{
"tc_ripemd160", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
"RIPEMD160 32/" ARCH_BITS_STR, // ALGORITHM_NAME,
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ TAG_RIPEMD160 },
tests_ripemd160
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_ripemd160,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_truecrypt_ripemd160boot = {
{
"tc_ripemd160boot", // FORMAT_LABEL
"TrueCrypt AES/Twofish/Serpent", // FORMAT_NAME
"RIPEMD160 32/" ARCH_BITS_STR, // ALGORITHM_NAME,
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ TAG_RIPEMD160BOOT },
tests_ripemd160boot
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_ripemd160boot,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_truecrypt_sha512 = {
{
"tc_sha512", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
#if SSE_GROUP_SZ_SHA512
"SHA512 " SHA512_ALGORITHM_NAME, // ALGORITHM_NAME,
#else
#if ARCH_BITS >= 64
"SHA512 64/" ARCH_BITS_STR,
#else
"SHA512 32/" ARCH_BITS_STR,
#endif
#endif
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
#if SSE_GROUP_SZ_SHA512
SSE_GROUP_SZ_SHA512,
SSE_GROUP_SZ_SHA512,
#else
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#endif
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ TAG_SHA512 },
tests_sha512
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_sha512,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_truecrypt_whirlpool = {
{
"tc_whirlpool", // FORMAT_LABEL
"TrueCrypt AES256_XTS", // FORMAT_NAME
#if ARCH_BITS >= 64
"WHIRLPOOL 64/" ARCH_BITS_STR, // ALGORITHM_NAME,
#else
"WHIRLPOOL 32/" ARCH_BITS_STR, // ALGORITHM_NAME,
#endif
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ TAG_WHIRLPOOL },
tests_whirlpool
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid_whirlpool,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
jacobi-block-for.cuda.c | #include "hclib.h"
#ifdef __cplusplus
#include "hclib_cpp.h"
#include "hclib_system.h"
#ifdef __CUDACC__
#include "hclib_cuda.h"
#endif
#endif
# include "poisson.h"
/* #pragma omp task/taskwait version of SWEEP. */
void sweep (int nx, int ny, double dx, double dy, double *f_,
int itold, int itnew, double *u_, double *unew_, int block_size)
{
int it;
int block_x, block_y;
if (block_size == 0)
block_size = nx;
int max_blocks_x = (nx / block_size);
int max_blocks_y = (ny / block_size);
for (it = itold + 1; it <= itnew; it++)
{
// Save the current estimate.
for (block_x = 0; block_x < max_blocks_x; block_x++)
for (block_y = 0; block_y < max_blocks_y; block_y++)
copy_block(nx, ny, block_x, block_y, u_, unew_, block_size);
for (block_x = 0; block_x < max_blocks_x; block_x++)
for (block_y = 0; block_y < max_blocks_y; block_y++)
compute_estimate(block_x, block_y, u_, unew_, f_, dx, dy,
nx, ny, block_size);
}
}
|
serial_tree_learner.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
using namespace json11;
namespace LightGBM {
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian,
const Json& forced_split_json) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
protected:
virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
double CalculateOndemandCosts(int feature_index, int leaf_index);
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief used feature indices in current tree */
std::vector<int> used_feature_indices_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
std::vector<bool> is_feature_used_in_split_;
std::vector<uint32_t> feature_used_in_data;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
kernels.c | #include <math.h>
/* This routine computes the 2-norm of a vector */
double norm2(int n, double *x){
int i;
double res;
res = 0.0;
/* CC: reduction is necessary to have a correct result */
#pragma omp parallel for reduction(+:res)
for(i=0; i<n; i++)
res += x[i]*x[i];
res = sqrt(res);
return res;
}
/* This routine computes the dot-product of two vectors */
double dot(int n, double *x, double *y){
int i;
double res;
res = 0.0;
/* CC: reduction is necessary to have a correct result */
#pragma omp parallel for reduction(+:res)
for(i=0; i<n; i++)
res += x[i]*y[i];
return res;
}
/* This routine computes the product of a sparse matrix A of size m
times a vector x and stores the result in a vector y :
y = alpha*A*x + beta*y */
void spmv(int n, int *rowptr, int *colind, double *val, double alpha, double *x, double beta, double *y){
int i, j;
/* CC: simple parallelization by rows. The dynamic scheculing helps
in case the matrix has rows with different number of nonzeroes. */
#pragma omp parallel for private(j) //schedule(dynamic,50)
for(i=0; i<n; i++){
/* for each row... */
y[i] = beta*y[i];
for(j=rowptr[i]; j<rowptr[i+1]; j++){
/* for each coefficient in the row... */
y[i] += alpha*val[j]*x[colind[j]];
}
}
return;
}
/* This routine computes the sum of two vectors x and y of size m and
stores the result in y
y = beta*y + alpha*x */
void axpby(int n, double alpha, double *x, double beta, double *y){
int i;
/* CC: as easy as it gets */
#pragma omp parallel for
for(i=0; i<n; i++)
y[i] = beta*y[i]+alpha*x[i];
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) {
for (t4=max(max(ceild(t1-1020,1024),ceild(4*t2-Nz-2035,2048)),ceild(24*t3-Ny-2035,2048));t4<=min(min(min(floord(4*Nt+Nx-9,2048),floord(2*t1+Nx-3,2048)),floord(4*t2+Nx-9,2048)),floord(24*t3+Nx+11,2048));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
nlcpy_argsort.c | /*
#
# * The source code in this file is developed independently by NEC Corporation.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
*/
#include "nlcpy.h"
/****************************
*
* @OPERATOR_NAME@
*
* **************************/
uint64_t nlcpy_argsort_bool(ve_array *val, ve_array *idx, int32_t *psw)
{
asl_error_t asl_err;
asl_sort_t sort;
int32_t *pval = (int32_t *)nlcpy__get_ptr(val);
if (pval == NULL) return NLCPY_ERROR_MEMORY;
int64_t *pidx = (int64_t *)nlcpy__get_ptr(idx);
if (pidx == NULL) return NLCPY_ERROR_MEMORY;
/////////
// 0-d //
/////////
if (val->ndim == 0) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
pval[0] = 0;
} /* omp single */
/////////
// 1-d //
/////////
} else if (val->ndim == 1) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
const uint64_t ival0 = val->strides[0] / val->itemsize;
const uint64_t iidx0 = idx->strides[0] / idx->itemsize;
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* create sorter */
asl_err = asl_sort_create_i32(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* preallocate */
asl_err = asl_sort_preallocate(sort, val->size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* execute sort */
asl_err = asl_sort_execute_i32(sort, val->size, pval, ASL_NULL, ASL_NULL, pidx);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
/////////
// N-d //
/////////
} else if (val->ndim > 1 && val->ndim <= NLCPY_MAXNDIM){
#ifdef _OPENMP
const int nt = omp_get_num_threads();
const int it = omp_get_thread_num();
#else
const int nt = 1;
const int it = 0;
#endif /* _OPENMP */
int64_t *cnt_val = (int64_t*)alloca(sizeof(int64_t) * val->ndim);
nlcpy__reset_coords(cnt_val, val->ndim);
int64_t i, j, k;
int64_t n_inner = val->ndim - 1;
int64_t n_outer = 0;
uint64_t ival = 0;
uint64_t iidx = 0;
const uint64_t ival0 = val->strides[n_inner] / val->itemsize;
const uint64_t iidx0 = idx->strides[n_inner] / idx->itemsize;
const uint64_t sort_size = val->shape[n_inner];
const int64_t len = val->shape[n_outer];
const int64_t cnt_s = len * it / nt;
const int64_t cnt_e = len * (it + 1) / nt;
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{
/* create sorter */
asl_err = asl_sort_create_i32(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp critical */
/* preallocate */
asl_err = asl_sort_preallocate(sort, sort_size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
for (int64_t cnt = cnt_s; cnt < cnt_e; cnt++) {
ival = cnt * val->strides[n_outer] / val->itemsize;
iidx = cnt * idx->strides[n_outer] / idx->itemsize;
for (;;) {
/* execute sort */
asl_err = asl_sort_execute_i32(sort, sort_size, &(pval[ival]), ASL_NULL, ASL_NULL, &(pidx[iidx]));
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
// set next index
for (k = n_inner-1; k >= 1; k--) {
if (++cnt_val[k] < val->shape[k]) {
ival += val->strides[k] / val->itemsize;
iidx += idx->strides[k] / idx->itemsize;
break;
}
cnt_val[k] = 0;
ival -= (val->strides[k] / val->itemsize) * (val->shape[k] - 1);
iidx -= (idx->strides[k] / idx->itemsize) * (idx->shape[k] - 1);
}
if (k < 1) break;
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif /* _OPENMP */
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} else {
return (uint64_t)NLCPY_ERROR_NDIM;
}
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* restore thread count */
asl_err = asl_library_set_thread_count(nt);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
}
retrieve_fpe_flags(psw);
return (uint64_t)NLCPY_ERROR_OK;
}
uint64_t nlcpy_argsort_i32(ve_array *val, ve_array *idx, int32_t *psw)
{
asl_error_t asl_err;
asl_sort_t sort;
int32_t *pval = (int32_t *)nlcpy__get_ptr(val);
if (pval == NULL) return NLCPY_ERROR_MEMORY;
int64_t *pidx = (int64_t *)nlcpy__get_ptr(idx);
if (pidx == NULL) return NLCPY_ERROR_MEMORY;
/////////
// 0-d //
/////////
if (val->ndim == 0) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
pval[0] = 0;
} /* omp single */
/////////
// 1-d //
/////////
} else if (val->ndim == 1) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
const uint64_t ival0 = val->strides[0] / val->itemsize;
const uint64_t iidx0 = idx->strides[0] / idx->itemsize;
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* create sorter */
asl_err = asl_sort_create_i32(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* preallocate */
asl_err = asl_sort_preallocate(sort, val->size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* execute sort */
asl_err = asl_sort_execute_i32(sort, val->size, pval, ASL_NULL, ASL_NULL, pidx);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
/////////
// N-d //
/////////
} else if (val->ndim > 1 && val->ndim <= NLCPY_MAXNDIM){
#ifdef _OPENMP
const int nt = omp_get_num_threads();
const int it = omp_get_thread_num();
#else
const int nt = 1;
const int it = 0;
#endif /* _OPENMP */
int64_t *cnt_val = (int64_t*)alloca(sizeof(int64_t) * val->ndim);
nlcpy__reset_coords(cnt_val, val->ndim);
int64_t i, j, k;
int64_t n_inner = val->ndim - 1;
int64_t n_outer = 0;
uint64_t ival = 0;
uint64_t iidx = 0;
const uint64_t ival0 = val->strides[n_inner] / val->itemsize;
const uint64_t iidx0 = idx->strides[n_inner] / idx->itemsize;
const uint64_t sort_size = val->shape[n_inner];
const int64_t len = val->shape[n_outer];
const int64_t cnt_s = len * it / nt;
const int64_t cnt_e = len * (it + 1) / nt;
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{
/* create sorter */
asl_err = asl_sort_create_i32(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp critical */
/* preallocate */
asl_err = asl_sort_preallocate(sort, sort_size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
for (int64_t cnt = cnt_s; cnt < cnt_e; cnt++) {
ival = cnt * val->strides[n_outer] / val->itemsize;
iidx = cnt * idx->strides[n_outer] / idx->itemsize;
for (;;) {
/* execute sort */
asl_err = asl_sort_execute_i32(sort, sort_size, &(pval[ival]), ASL_NULL, ASL_NULL, &(pidx[iidx]));
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
// set next index
for (k = n_inner-1; k >= 1; k--) {
if (++cnt_val[k] < val->shape[k]) {
ival += val->strides[k] / val->itemsize;
iidx += idx->strides[k] / idx->itemsize;
break;
}
cnt_val[k] = 0;
ival -= (val->strides[k] / val->itemsize) * (val->shape[k] - 1);
iidx -= (idx->strides[k] / idx->itemsize) * (idx->shape[k] - 1);
}
if (k < 1) break;
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif /* _OPENMP */
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} else {
return (uint64_t)NLCPY_ERROR_NDIM;
}
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* restore thread count */
asl_err = asl_library_set_thread_count(nt);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
}
retrieve_fpe_flags(psw);
return (uint64_t)NLCPY_ERROR_OK;
}
uint64_t nlcpy_argsort_i64(ve_array *val, ve_array *idx, int32_t *psw)
{
asl_error_t asl_err;
asl_sort_t sort;
int64_t *pval = (int64_t *)nlcpy__get_ptr(val);
if (pval == NULL) return NLCPY_ERROR_MEMORY;
int64_t *pidx = (int64_t *)nlcpy__get_ptr(idx);
if (pidx == NULL) return NLCPY_ERROR_MEMORY;
/////////
// 0-d //
/////////
if (val->ndim == 0) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
pval[0] = 0;
} /* omp single */
/////////
// 1-d //
/////////
} else if (val->ndim == 1) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
const uint64_t ival0 = val->strides[0] / val->itemsize;
const uint64_t iidx0 = idx->strides[0] / idx->itemsize;
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* create sorter */
asl_err = asl_sort_create_i64(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* preallocate */
asl_err = asl_sort_preallocate(sort, val->size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* execute sort */
asl_err = asl_sort_execute_i64(sort, val->size, pval, ASL_NULL, ASL_NULL, pidx);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
/////////
// N-d //
/////////
} else if (val->ndim > 1 && val->ndim <= NLCPY_MAXNDIM){
#ifdef _OPENMP
const int nt = omp_get_num_threads();
const int it = omp_get_thread_num();
#else
const int nt = 1;
const int it = 0;
#endif /* _OPENMP */
int64_t *cnt_val = (int64_t*)alloca(sizeof(int64_t) * val->ndim);
nlcpy__reset_coords(cnt_val, val->ndim);
int64_t i, j, k;
int64_t n_inner = val->ndim - 1;
int64_t n_outer = 0;
uint64_t ival = 0;
uint64_t iidx = 0;
const uint64_t ival0 = val->strides[n_inner] / val->itemsize;
const uint64_t iidx0 = idx->strides[n_inner] / idx->itemsize;
const uint64_t sort_size = val->shape[n_inner];
const int64_t len = val->shape[n_outer];
const int64_t cnt_s = len * it / nt;
const int64_t cnt_e = len * (it + 1) / nt;
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{
/* create sorter */
asl_err = asl_sort_create_i64(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp critical */
/* preallocate */
asl_err = asl_sort_preallocate(sort, sort_size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
for (int64_t cnt = cnt_s; cnt < cnt_e; cnt++) {
ival = cnt * val->strides[n_outer] / val->itemsize;
iidx = cnt * idx->strides[n_outer] / idx->itemsize;
for (;;) {
/* execute sort */
asl_err = asl_sort_execute_i64(sort, sort_size, &(pval[ival]), ASL_NULL, ASL_NULL, &(pidx[iidx]));
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
// set next index
for (k = n_inner-1; k >= 1; k--) {
if (++cnt_val[k] < val->shape[k]) {
ival += val->strides[k] / val->itemsize;
iidx += idx->strides[k] / idx->itemsize;
break;
}
cnt_val[k] = 0;
ival -= (val->strides[k] / val->itemsize) * (val->shape[k] - 1);
iidx -= (idx->strides[k] / idx->itemsize) * (idx->shape[k] - 1);
}
if (k < 1) break;
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif /* _OPENMP */
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} else {
return (uint64_t)NLCPY_ERROR_NDIM;
}
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* restore thread count */
asl_err = asl_library_set_thread_count(nt);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
}
retrieve_fpe_flags(psw);
return (uint64_t)NLCPY_ERROR_OK;
}
uint64_t nlcpy_argsort_u32(ve_array *val, ve_array *idx, int32_t *psw)
{
asl_error_t asl_err;
asl_sort_t sort;
uint32_t *pval = (uint32_t *)nlcpy__get_ptr(val);
if (pval == NULL) return NLCPY_ERROR_MEMORY;
int64_t *pidx = (int64_t *)nlcpy__get_ptr(idx);
if (pidx == NULL) return NLCPY_ERROR_MEMORY;
/////////
// 0-d //
/////////
if (val->ndim == 0) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
pval[0] = 0;
} /* omp single */
/////////
// 1-d //
/////////
} else if (val->ndim == 1) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
const uint64_t ival0 = val->strides[0] / val->itemsize;
const uint64_t iidx0 = idx->strides[0] / idx->itemsize;
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* create sorter */
asl_err = asl_sort_create_u32(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* preallocate */
asl_err = asl_sort_preallocate(sort, val->size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* execute sort */
asl_err = asl_sort_execute_u32(sort, val->size, pval, ASL_NULL, ASL_NULL, pidx);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
/////////
// N-d //
/////////
} else if (val->ndim > 1 && val->ndim <= NLCPY_MAXNDIM){
#ifdef _OPENMP
const int nt = omp_get_num_threads();
const int it = omp_get_thread_num();
#else
const int nt = 1;
const int it = 0;
#endif /* _OPENMP */
int64_t *cnt_val = (int64_t*)alloca(sizeof(int64_t) * val->ndim);
nlcpy__reset_coords(cnt_val, val->ndim);
int64_t i, j, k;
int64_t n_inner = val->ndim - 1;
int64_t n_outer = 0;
uint64_t ival = 0;
uint64_t iidx = 0;
const uint64_t ival0 = val->strides[n_inner] / val->itemsize;
const uint64_t iidx0 = idx->strides[n_inner] / idx->itemsize;
const uint64_t sort_size = val->shape[n_inner];
const int64_t len = val->shape[n_outer];
const int64_t cnt_s = len * it / nt;
const int64_t cnt_e = len * (it + 1) / nt;
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{
/* create sorter */
asl_err = asl_sort_create_u32(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp critical */
/* preallocate */
asl_err = asl_sort_preallocate(sort, sort_size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
for (int64_t cnt = cnt_s; cnt < cnt_e; cnt++) {
ival = cnt * val->strides[n_outer] / val->itemsize;
iidx = cnt * idx->strides[n_outer] / idx->itemsize;
for (;;) {
/* execute sort */
asl_err = asl_sort_execute_u32(sort, sort_size, &(pval[ival]), ASL_NULL, ASL_NULL, &(pidx[iidx]));
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
// set next index
for (k = n_inner-1; k >= 1; k--) {
if (++cnt_val[k] < val->shape[k]) {
ival += val->strides[k] / val->itemsize;
iidx += idx->strides[k] / idx->itemsize;
break;
}
cnt_val[k] = 0;
ival -= (val->strides[k] / val->itemsize) * (val->shape[k] - 1);
iidx -= (idx->strides[k] / idx->itemsize) * (idx->shape[k] - 1);
}
if (k < 1) break;
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif /* _OPENMP */
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} else {
return (uint64_t)NLCPY_ERROR_NDIM;
}
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* restore thread count */
asl_err = asl_library_set_thread_count(nt);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
}
retrieve_fpe_flags(psw);
return (uint64_t)NLCPY_ERROR_OK;
}
uint64_t nlcpy_argsort_u64(ve_array *val, ve_array *idx, int32_t *psw)
{
asl_error_t asl_err;
asl_sort_t sort;
uint64_t *pval = (uint64_t *)nlcpy__get_ptr(val);
if (pval == NULL) return NLCPY_ERROR_MEMORY;
int64_t *pidx = (int64_t *)nlcpy__get_ptr(idx);
if (pidx == NULL) return NLCPY_ERROR_MEMORY;
/////////
// 0-d //
/////////
if (val->ndim == 0) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
pval[0] = 0;
} /* omp single */
/////////
// 1-d //
/////////
} else if (val->ndim == 1) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
const uint64_t ival0 = val->strides[0] / val->itemsize;
const uint64_t iidx0 = idx->strides[0] / idx->itemsize;
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* create sorter */
asl_err = asl_sort_create_u64(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* preallocate */
asl_err = asl_sort_preallocate(sort, val->size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* execute sort */
asl_err = asl_sort_execute_u64(sort, val->size, pval, ASL_NULL, ASL_NULL, pidx);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
/////////
// N-d //
/////////
} else if (val->ndim > 1 && val->ndim <= NLCPY_MAXNDIM){
#ifdef _OPENMP
const int nt = omp_get_num_threads();
const int it = omp_get_thread_num();
#else
const int nt = 1;
const int it = 0;
#endif /* _OPENMP */
int64_t *cnt_val = (int64_t*)alloca(sizeof(int64_t) * val->ndim);
nlcpy__reset_coords(cnt_val, val->ndim);
int64_t i, j, k;
int64_t n_inner = val->ndim - 1;
int64_t n_outer = 0;
uint64_t ival = 0;
uint64_t iidx = 0;
const uint64_t ival0 = val->strides[n_inner] / val->itemsize;
const uint64_t iidx0 = idx->strides[n_inner] / idx->itemsize;
const uint64_t sort_size = val->shape[n_inner];
const int64_t len = val->shape[n_outer];
const int64_t cnt_s = len * it / nt;
const int64_t cnt_e = len * (it + 1) / nt;
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{
/* create sorter */
asl_err = asl_sort_create_u64(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp critical */
/* preallocate */
asl_err = asl_sort_preallocate(sort, sort_size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
for (int64_t cnt = cnt_s; cnt < cnt_e; cnt++) {
ival = cnt * val->strides[n_outer] / val->itemsize;
iidx = cnt * idx->strides[n_outer] / idx->itemsize;
for (;;) {
/* execute sort */
asl_err = asl_sort_execute_u64(sort, sort_size, &(pval[ival]), ASL_NULL, ASL_NULL, &(pidx[iidx]));
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
// set next index
for (k = n_inner-1; k >= 1; k--) {
if (++cnt_val[k] < val->shape[k]) {
ival += val->strides[k] / val->itemsize;
iidx += idx->strides[k] / idx->itemsize;
break;
}
cnt_val[k] = 0;
ival -= (val->strides[k] / val->itemsize) * (val->shape[k] - 1);
iidx -= (idx->strides[k] / idx->itemsize) * (idx->shape[k] - 1);
}
if (k < 1) break;
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif /* _OPENMP */
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} else {
return (uint64_t)NLCPY_ERROR_NDIM;
}
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* restore thread count */
asl_err = asl_library_set_thread_count(nt);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
}
retrieve_fpe_flags(psw);
return (uint64_t)NLCPY_ERROR_OK;
}
uint64_t nlcpy_argsort_f32(ve_array *val, ve_array *idx, int32_t *psw)
{
asl_error_t asl_err;
asl_sort_t sort;
float *pval = (float *)nlcpy__get_ptr(val);
if (pval == NULL) return NLCPY_ERROR_MEMORY;
int64_t *pidx = (int64_t *)nlcpy__get_ptr(idx);
if (pidx == NULL) return NLCPY_ERROR_MEMORY;
/////////
// 0-d //
/////////
if (val->ndim == 0) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
pval[0] = 0;
} /* omp single */
/////////
// 1-d //
/////////
} else if (val->ndim == 1) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
const uint64_t ival0 = val->strides[0] / val->itemsize;
const uint64_t iidx0 = idx->strides[0] / idx->itemsize;
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* create sorter */
asl_err = asl_sort_create_s(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* preallocate */
asl_err = asl_sort_preallocate(sort, val->size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* execute sort */
asl_err = asl_sort_execute_s(sort, val->size, pval, ASL_NULL, ASL_NULL, pidx);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
/////////
// N-d //
/////////
} else if (val->ndim > 1 && val->ndim <= NLCPY_MAXNDIM){
#ifdef _OPENMP
const int nt = omp_get_num_threads();
const int it = omp_get_thread_num();
#else
const int nt = 1;
const int it = 0;
#endif /* _OPENMP */
int64_t *cnt_val = (int64_t*)alloca(sizeof(int64_t) * val->ndim);
nlcpy__reset_coords(cnt_val, val->ndim);
int64_t i, j, k;
int64_t n_inner = val->ndim - 1;
int64_t n_outer = 0;
uint64_t ival = 0;
uint64_t iidx = 0;
const uint64_t ival0 = val->strides[n_inner] / val->itemsize;
const uint64_t iidx0 = idx->strides[n_inner] / idx->itemsize;
const uint64_t sort_size = val->shape[n_inner];
const int64_t len = val->shape[n_outer];
const int64_t cnt_s = len * it / nt;
const int64_t cnt_e = len * (it + 1) / nt;
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{
/* create sorter */
asl_err = asl_sort_create_s(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp critical */
/* preallocate */
asl_err = asl_sort_preallocate(sort, sort_size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
for (int64_t cnt = cnt_s; cnt < cnt_e; cnt++) {
ival = cnt * val->strides[n_outer] / val->itemsize;
iidx = cnt * idx->strides[n_outer] / idx->itemsize;
for (;;) {
/* execute sort */
asl_err = asl_sort_execute_s(sort, sort_size, &(pval[ival]), ASL_NULL, ASL_NULL, &(pidx[iidx]));
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
// set next index
for (k = n_inner-1; k >= 1; k--) {
if (++cnt_val[k] < val->shape[k]) {
ival += val->strides[k] / val->itemsize;
iidx += idx->strides[k] / idx->itemsize;
break;
}
cnt_val[k] = 0;
ival -= (val->strides[k] / val->itemsize) * (val->shape[k] - 1);
iidx -= (idx->strides[k] / idx->itemsize) * (idx->shape[k] - 1);
}
if (k < 1) break;
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif /* _OPENMP */
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} else {
return (uint64_t)NLCPY_ERROR_NDIM;
}
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* restore thread count */
asl_err = asl_library_set_thread_count(nt);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
}
retrieve_fpe_flags(psw);
return (uint64_t)NLCPY_ERROR_OK;
}
uint64_t nlcpy_argsort_f64(ve_array *val, ve_array *idx, int32_t *psw)
{
asl_error_t asl_err;
asl_sort_t sort;
double *pval = (double *)nlcpy__get_ptr(val);
if (pval == NULL) return NLCPY_ERROR_MEMORY;
int64_t *pidx = (int64_t *)nlcpy__get_ptr(idx);
if (pidx == NULL) return NLCPY_ERROR_MEMORY;
/////////
// 0-d //
/////////
if (val->ndim == 0) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
pval[0] = 0;
} /* omp single */
/////////
// 1-d //
/////////
} else if (val->ndim == 1) {
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
const uint64_t ival0 = val->strides[0] / val->itemsize;
const uint64_t iidx0 = idx->strides[0] / idx->itemsize;
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* create sorter */
asl_err = asl_sort_create_d(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* preallocate */
asl_err = asl_sort_preallocate(sort, val->size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* execute sort */
asl_err = asl_sort_execute_d(sort, val->size, pval, ASL_NULL, ASL_NULL, pidx);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
/////////
// N-d //
/////////
} else if (val->ndim > 1 && val->ndim <= NLCPY_MAXNDIM){
#ifdef _OPENMP
const int nt = omp_get_num_threads();
const int it = omp_get_thread_num();
#else
const int nt = 1;
const int it = 0;
#endif /* _OPENMP */
int64_t *cnt_val = (int64_t*)alloca(sizeof(int64_t) * val->ndim);
nlcpy__reset_coords(cnt_val, val->ndim);
int64_t i, j, k;
int64_t n_inner = val->ndim - 1;
int64_t n_outer = 0;
uint64_t ival = 0;
uint64_t iidx = 0;
const uint64_t ival0 = val->strides[n_inner] / val->itemsize;
const uint64_t iidx0 = idx->strides[n_inner] / idx->itemsize;
const uint64_t sort_size = val->shape[n_inner];
const int64_t len = val->shape[n_outer];
const int64_t cnt_s = len * it / nt;
const int64_t cnt_e = len * (it + 1) / nt;
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* set thread count */
asl_err = asl_library_set_thread_count(1);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp single */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
{
/* create sorter */
asl_err = asl_sort_create_d(&sort, ASL_SORTORDER_ASCENDING, ASL_SORTALGORITHM_AUTO);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} /* omp critical */
/* preallocate */
asl_err = asl_sort_preallocate(sort, sort_size);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
/* set strides */
asl_err = asl_sort_set_input_key_long_stride(sort, ival0);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
for (int64_t cnt = cnt_s; cnt < cnt_e; cnt++) {
ival = cnt * val->strides[n_outer] / val->itemsize;
iidx = cnt * idx->strides[n_outer] / idx->itemsize;
for (;;) {
/* execute sort */
asl_err = asl_sort_execute_d(sort, sort_size, &(pval[ival]), ASL_NULL, ASL_NULL, &(pidx[iidx]));
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
// set next index
for (k = n_inner-1; k >= 1; k--) {
if (++cnt_val[k] < val->shape[k]) {
ival += val->strides[k] / val->itemsize;
iidx += idx->strides[k] / idx->itemsize;
break;
}
cnt_val[k] = 0;
ival -= (val->strides[k] / val->itemsize) * (val->shape[k] - 1);
iidx -= (idx->strides[k] / idx->itemsize) * (idx->shape[k] - 1);
}
if (k < 1) break;
}
}
#ifdef _OPENMP
#pragma omp barrier
#endif /* _OPENMP */
/* destroy sorter */
asl_err = asl_sort_destroy(sort);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
} else {
return (uint64_t)NLCPY_ERROR_NDIM;
}
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp single
#endif /* _OPENMP */
{
/* restore thread count */
asl_err = asl_library_set_thread_count(nt);
if (asl_err != ASL_ERROR_OK) return NLCPY_ERROR_ASL;
}
retrieve_fpe_flags(psw);
return (uint64_t)NLCPY_ERROR_OK;
}
uint64_t nlcpy_argsort(ve_arguments *args, int32_t *psw)
{
ve_array *val = &(args->unary.x);
ve_array *idx = &(args->unary.z);
uint64_t err = NLCPY_ERROR_OK;
switch (val->dtype) {
case ve_bool: err = nlcpy_argsort_bool (val, idx, psw); break;
case ve_i32: err = nlcpy_argsort_i32 (val, idx, psw); break;
case ve_i64: err = nlcpy_argsort_i64 (val, idx, psw); break;
case ve_u32: err = nlcpy_argsort_u32 (val, idx, psw); break;
case ve_u64: err = nlcpy_argsort_u64 (val, idx, psw); break;
case ve_f32: err = nlcpy_argsort_f32 (val, idx, psw); break;
case ve_f64: err = nlcpy_argsort_f64 (val, idx, psw); break;
default: err = NLCPY_ERROR_DTYPE;
}
return (uint64_t)err;
}
|
commondraw.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : commondraw.c
* Description : common drawing
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_commondraw_c__
#define __libaroma_commondraw_c__
#include <aroma_internal.h>
/*
* Function : libaroma_draw_limit
* Return Value: int
* Descriptions: get limit position
*/
int libaroma_draw_limit(
int x, int max) {
if (x<0) {
return 0;
}
if (x>=max) {
return max-1;
}
return x;
} /* End of libaroma_draw_limit */
/*
* Function : libaroma_draw_limited
* Return Value: byte
* Descriptions: is draw position limited/overflow
*/
byte libaroma_draw_limited(
int x, int max) {
return ((x < 0) || (x >= max) ? 1 : 0);
} /* End of libaroma_draw_limited */
/*
* Function : libaroma_draw_ex2
* Return Value: byte
* Descriptions: canvas drawing
*/
byte libaroma_draw_ex2(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sw, int sh,
byte draw_flags,
byte opacity,
byte ismask,
word maskcolor
) {
if (src == NULL) {
ALOGW("libaroma_draw_ex1 src = NULL");
return 0;
}
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if ((dx >= dst->w) || (dy >= dst->h)) {
ALOGW("libaroma_draw_ex1 dx/dy bigger that destination size");
return 0;
}
if (opacity==0) {
return 1; /* No Need Any Process */
}
byte useAlpha = (draw_flags&LIBAROMA_DRAW_WITH_ALPHA)?1:0;
byte noDither = (draw_flags&LIBAROMA_DRAW_NODITHER)?1:0;
byte toBlack = (draw_flags&LIBAROMA_DRAW_TO_BLACK)?1:0;
/* fix positions */
if (sx < 0) {
dx += abs(sx);
sw -= abs(sx);
sx = 0;
}
if (sy < 0) {
dy += abs(sy);
sh -= abs(sy);
sy = 0;
}
/* fix size */
if (sw + sx >= src->w) {
sw -= (sw + sx) - src->w;
}
if (sh + sy >= src->h) {
sh -= (sh + sy) - src->h;
}
if ((sw <= 0) || (sh <= 0)) {
ALOGW("libaroma_draw_ex1 calculated sw/sh < 1");
return 0;
}
/* set calculated units */
int sr_w = sw;
int sr_h = sh;
int sr_x = sx;
int sr_y = sy;
int ds_x = dx;
int ds_y = dy;
/* fix destination */
if (dx < 0) {
int ndx = abs(dx);
sr_x += abs(ndx);
sr_w -= ndx;
ds_x = 0;
}
if (dy < 0) {
int ndy = abs(dy);
sr_y += ndy;
sr_h -= ndy;
ds_y = 0;
}
/* fix source size */
if (sr_w + dx > dst->w) {
sr_w -= (sr_w + dx) - dst->w;
}
if (sr_h + dy > dst->h) {
sr_h -= (sr_h + dy) - dst->h;
}
/* prepare loop data */
int y;
int pos_sr_x = sr_x * 2;
int pos_ds_x = ds_x * 2;
int pos_sc_w = src->l * 2;
int pos_dc_w = dst->l * 2;
int copy_sz = sr_w * 2;
byte * src_data = ((byte *) src->data);
byte * dst_data = ((byte *) dst->data);
if (useAlpha) {
if (src->alpha == NULL) {
useAlpha = 0;
}
}
if (!useAlpha){
ismask=0;
}
if (opacity == 0xff) {
if (useAlpha) {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data+((ds_y + y)*pos_dc_w)+pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, dst_mem, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (noDither){
libaroma_alpha_px(
sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
else{
libaroma_alpha_px_line(
y, sr_w, dst_mem, dst_mem,
src_mem, (bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
}
}
}
}
else {
/* Copy Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
memcpy(
dst_data + ((ds_y + y)*pos_dc_w) + pos_ds_x,
src_data + ((sr_y + y)*pos_sc_w) + pos_sr_x,
copy_sz
);
}
}
}
else {
if (useAlpha) {
/* Blend Destination with Source */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp tmp_dst = (wordp) malloc(sr_w * 2);
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
if (ismask){
libaroma_alpha_mono(
sr_w, tmp_dst, dst_mem, maskcolor,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
wordp src_mem = (wordp) (src_data+((sr_y + y)*pos_sc_w)+pos_sr_x);
if (toBlack){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_black(sr_w, dst_mem, tmp_dst, opacity);
}
else if (noDither){
libaroma_alpha_px(
sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
else{
libaroma_alpha_px_line(
y, sr_w, tmp_dst, dst_mem, src_mem,
(bytep) (src->alpha + ((sr_y + y) * src->l) + sr_x)
);
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, tmp_dst, opacity
);
}
}
free(tmp_dst);
}
}
else {
/* Blend Data Directly */
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (y = 0; y < sr_h; y++) {
wordp dst_mem = (wordp) (dst_data + ((ds_y + y) * pos_dc_w) + pos_ds_x);
wordp src_mem = (wordp) (src_data + ((sr_y + y) * pos_sc_w) + pos_sr_x);
if (toBlack){
libaroma_alpha_black(sr_w, dst_mem, src_mem, opacity);
}
else if (noDither){
libaroma_alpha_const(
sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
else{
libaroma_alpha_const_line(
y, sr_w, dst_mem, dst_mem, src_mem, opacity
);
}
}
}
}
return 1;
} /* End of libaroma_draw_ex1 */
/*
* Function : libaroma_draw_rect
* Return Value: byte
* Descriptions: draw rectangle
*/
byte libaroma_draw_rect(
LIBAROMA_CANVASP dst,
int x, int y, int w, int h,
word color, byte alpha) {
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
/* fix position */
int x2 = x + w;
int y2 = y + h;
if (x2 > dst->w) {
x2 = dst->w;
}
if (y2 > dst->h) {
y2 = dst->h;
}
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
/* fixed size */
w = x2 - x;
h = y2 - y;
/* draw */
int dy;
if (alpha == 0xff) {
wordp datapos = dst->data + x;
#ifdef libaroma_memset16
for (dy = y; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
libaroma_color_set(linepos,color,w);
}
#else
int w2=w*2;
wordp firstline = datapos + (y * dst->l);
libaroma_color_set(firstline, color, w);
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y+1; dy < y2; dy++) {
wordp linepos = datapos + (dy * dst->l);
memcpy(linepos,firstline,w2);
}
#endif
}
else {
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (dy = y; dy < y2; dy++) {
wordp linepos = dst->data + (dy * dst->l) + x;
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(dy, w, linepos, linepos, color, alpha);
#else
libaroma_alpha_rgba_fill(w, linepos, linepos, color, alpha);
#endif
}
}
return 1;
} /* End of libaroma_draw_rect */
/*
* Function : libaroma_draw_pixel
* Return Value: byte
* Descriptions: draw pixel
*/
byte libaroma_draw_pixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
word color,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
wordp d=&dest->data[dest->l * dy + dx];
if (alpha==0xff){
*d = color;
}
else if (alpha>0){
*d = libaroma_alpha(*d,color,alpha);
}
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_alphapixel
* Return Value: byte
* Descriptions: set alpha pixel
*/
byte libaroma_draw_alphapixel(
LIBAROMA_CANVASP dest,
int dx, int dy,
byte alpha
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<0)||(dy<0)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
if (dest->alpha==NULL){
return 0;
}
dest->alpha[dest->l * dy + dx] = alpha;
return 1;
} /* End of libaroma_draw_pixel */
/*
* Function : libaroma_draw_line
* Return Value: byte
* Descriptions: draw line
*/
byte libaroma_draw_line(
LIBAROMA_CANVASP dest,
int x0, int y0, int x1, int y1,
float wd,
word color,
byte alpha,
byte is_mask){
#define __DRAW_PIX(x,y,a) \
if (is_mask==1){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(alpha,MAX(0, alpha * (1-(a)))) \
)) { break; } \
} \
else if (is_mask==2){ \
if (!libaroma_draw_alphapixel( \
dest, x, y, \
MIN(0xff,MAX(0, 255 * (a))) \
)) { break; } \
} \
else{ \
if (!libaroma_draw_pixel( \
dest, x, y, color, \
MIN(0xff,MAX(0, alpha * (1-(a)))) \
)) { break; } \
}
if (!dest){
dest=libaroma_fb()->canvas;
}
int dx = abs(x1-x0), sx = x0 < x1 ? 1 : -1;
int dy = abs(y1-y0), sy = y0 < y1 ? 1 : -1;
int err = dx-dy, e2, x2, y2;
float ed = dx+dy == 0 ? 1 : sqrt((float)dx*dx+(float)dy*dy);
for (wd = (wd+1)/2; ; ) {
if ((x0>=0)&&(y0>=0)){
__DRAW_PIX(x0,y0,
abs(err-dx+dy)/ed-wd+1
);
}
e2 = err; x2 = x0;
if (2*e2 >= -dx) {
for (e2 += dy, y2 = y0; e2 < ed*wd && (y1 != y2 || dx > dy); e2 += dx){
if ((x0>=0)&&(y2>=0)){
__DRAW_PIX(x0, y2+=sy,
abs(e2)/ed-wd+1
);
}
}
if (x0==x1){
break;
}
e2 = err; err -= dy; x0 += sx;
}
if (2*e2 <= dy){
for (e2 = dx-e2; e2 < ed*wd && (x1 != x2 || dx < dy); e2 += dy){
if ((x2>=0)&&(y0>=0)){
__DRAW_PIX(x2 += sx, y0,
abs(e2)/ed-wd+1
);
}
}
if (y0==y1){
break;
}
err += dx; y0 += sy;
}
}
#undef __DRAW_PIX
return 1;
} /* End of libaroma_draw_line */
/*
* Function : libaroma_draw_subpixel
* Return Value: byte
* Descriptions: draw subpixel
*/
byte libaroma_draw_subpixel(
LIBAROMA_CANVASP dest,
float dx, float dy, float tickness,
word color,
byte alpha){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((dx<=-1)||(dy<=-1)||(dy>=dest->h)||(dx>=dest->w)){
return 0;
}
int x, y;
float px, py;
float ht=(tickness-1.0)/2;
for (y=floor(dy-ht);y<=ceil(dy+ht);y++){
if ((y>=0)&&(y<dest->h)){
int pos = y * dest->l;
for (x=floor(dx-ht);x<=ceil(dx+ht);x++){
if ((x>=0)&&(x<dest->w)){
px = abs((dx<x)?dx-x:x-dx)/ht;
py = abs((dy<y)?dy-y:y-dy)/ht;
int alp = MIN(0xff,MAX((1-(px+py)) * 0xff,0));
wordp d = dest->data + pos + x;
word cl = libaroma_alpha(*d, color, alp);
if (alpha!=0xff){
cl=libaroma_alpha(*d,cl,alpha);
}
*d=cl;
}
}
}
}
return 1;
} /* End of libaroma_draw_subpixel */
/*
* Function : libaroma_draw_mask_circle
* Return Value: byte
* Descriptions: draw masked circle
*/
byte libaroma_draw_mask_circle(
LIBAROMA_CANVASP dst,
LIBAROMA_CANVASP src,
int dx, int dy,
int sx, int sy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (src == NULL) {
return 0;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad2 = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
int psy = sy + y;
if ((pdy<dst->h)&&(pdy>=0)&&(psy<src->h)&&(psy>=0)){
int pos_d = pdy * dst->l;
int pos_s = psy * src->l;
int x = sqrt(rad2-y*y);
int w = x*2;
if (sx-x<0){
w-=abs(sx-x);
x=sx;
}
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
int sdx = sx-x;
if (sdx+w>src->w){
w=src->w-sdx;
}
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
wordp sd = src->data + pos_s + sdx;
if (alpha==0xff){
memcpy(dd,sd,w*2);
}
else{
//libaroma_alpha_const_line(pdy,w,dd,dd,sd,alpha);
libaroma_alpha_const(w,dd,dd,sd,alpha);
}
}
}
}
return 1;
} /* End of libaroma_draw_mask_circle */
/*
* Function : libaroma_draw_circle
* Return Value: byte
* Descriptions: draw filled circle
*/
byte libaroma_draw_circle(
LIBAROMA_CANVASP dst,
word color,
int dx, int dy,
int sz,
byte alpha){
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
if (sz<2){
return 1;
}
int radius = sz/2;
int rad2 = radius * radius;
int y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for(y=-radius; y<=radius; y++){
int pdy = dy + y;
if ((pdy<dst->h)&&(pdy>=0)){
int pos_d = pdy * dst->l;
int x = sqrt(rad2-y*y);
int w = x*2;
if (dx-x<0){
w-=abs(dx-x);
x=dx;
}
int pdx = dx-x;
if (pdx+w>dst->w){
w=dst->w-pdx;
}
if (w>0){
wordp dd = dst->data + pos_d + pdx;
if (alpha==0xff){
libaroma_color_set(dd,color,w);
}
else{
#ifdef __engine_have_libaroma_alpha_rgba_fill
libaroma_alpha_rgba_fill_line(pdy,w,dd, dd,color,alpha);
#else
libaroma_alpha_rgba_fill(w,dd, dd,color,alpha);
#endif
}
}
}
}
return 1;
} /* End of libaroma_draw_circle */
/*
* Function : libaroma_draw_line_width
* Return Value: byte
* Descriptions: draw line with width
*/
byte libaroma_draw_line_width(
LIBAROMA_CANVASP dest,
float x1, float y1, float x2, float y2,
float wd,
word color,
byte alpha,
byte is_mask,
float aliasing){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
float angle = atan2(y2 - y1, x2 - x1);
float t2sina1 = wd / 2 * sin(angle);
float t2cosa1 = wd / 2 * cos(angle);
float t2sina2 = wd / 2 * sin(angle);
float t2cosa2 = wd / 2 * cos(angle);
LIBAROMA_PATHP path=libaroma_path(x1 + t2sina1, y1 - t2cosa1);
libaroma_path_add(path, x2 + t2sina2, y2 - t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x2 - t2sina2, y2 + t2cosa2);
libaroma_path_add(path, x1 - t2sina1, y1 + t2cosa1);
libaroma_path_add(path, x1 + t2sina1, y1 - t2cosa1);
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_line_width */
/*
* Function : _libaroma_draw_arc_findpoint
* Return Value: byte
* Descriptions: find arc point
*/
byte _libaroma_draw_arc_findpoint(
LIBAROMA_PATHP path,
float dx, float dy,
float radius_w, float radius_h,
float xt0, float yt0,
float xt1, float yt1,
double start, double end
){
double radian;
if (start==end){
return 0;
}
else if (start<end){
radian = start + ((end - start) / 2.0);
}
else{
radian = end + ((start - end) / 2.0);
}
float xt = dx + radius_w*cos(radian);
float yt = dy + radius_h*sin(radian);
if ((abs(xt-xt0)>=2)||(abs(yt-yt0)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt0, yt0, xt, yt,
start, radian
);
}
libaroma_path_add(path, xt, yt);
if ((abs(xt-xt1)>=2)||(abs(yt-yt1)>=2)) {
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
xt, yt, xt1, yt1,
radian, end
);
}
libaroma_path_add(path, xt1, yt1);
return 1;
} /* End of _libaroma_draw_arc_findpoint */
/*
* Function : libaroma_draw_arc
* Return Value: byte
* Descriptions: draw arc into canvas
*/
byte libaroma_draw_arc(
LIBAROMA_CANVASP dest,
float dx, float dy,
float radius_w, float radius_h,
float width,
float start_angle, float end_angle,
word color,byte alpha,byte is_mask,float aliasing
){
if (!dest){
dest=libaroma_fb()->canvas;
}
if ((is_mask)&&(dest->alpha==NULL)){
return 0;
}
if ((!is_mask)&&(alpha<1)){
return 1;
}
if (start_angle==end_angle){
/* no draw needed */
return 1;
}
/*
start_angle=fmod(start_angle,360);
end_angle=fmod(end_angle,360);
*/
/*
start_angle=360-start_angle;
end_angle=360-end_angle;
*/
if (start_angle>end_angle){
float tmp=start_angle;
start_angle=end_angle;
end_angle=tmp;
}
double start_radian = start_angle* __PI / 180.0;
double end_radian = end_angle * __PI / 180.0;
float start_x = dx + radius_w*cos(start_radian);
float start_y = dy + radius_h*sin(start_radian);
float end_x = dx + radius_w*cos(end_radian);
float end_y = dy + radius_h*sin(end_radian);
LIBAROMA_PATHP path=libaroma_path(start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
start_radian, end_radian
);
libaroma_path_add(path, end_x, end_y);
if ((width>0)&&(width<radius_w/2)&&(width<radius_h/2)) {
radius_w -= width;
radius_h -= width;
/* roll */
start_x = dx + radius_w*cos(end_radian);
start_y = dy + radius_h*sin(end_radian);
end_x = dx + radius_w*cos(start_radian);
end_y = dy + radius_h*sin(start_radian);
libaroma_path_add(path, start_x, start_y);
_libaroma_draw_arc_findpoint(
path, dx, dy, radius_w, radius_h,
start_x, start_y, end_x, end_y,
end_radian, start_radian
);
}
byte res=libaroma_path_draw(
dest,
path,
color,
alpha,
is_mask,
aliasing);
libaroma_path_free(path);
return res;
} /* End of libaroma_draw_arc */
#endif /* __libaroma_commondraw_c__ */
|
diagsm_x_sky_n_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_Number diag[A->rows];
memset(diag, '\0', A->rows * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->rows; r++)
{
const ALPHA_INT indx = A->pointers[r + 1] - 1;
diag[r] = A->values[indx];
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows; ++r)
{
ALPHA_Number t;
alpha_mul(t, alpha, x[index2(c, r, ldx)]);
alpha_div(y[index2(c, r, ldy)], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
Sqrt.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/Sqrt.c"
#else
static int nn_(Sqrt_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
real bias = luaT_getfieldchecknumber(L,1,"eps");
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
THTensor_(resizeAs)(output, input);
if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output))
{
TH_TENSOR_APPLY2(real, output, real, input, \
*output_data = sqrt(*input_data + bias););
}
else
{
real* output_data = THTensor_(data)(output);
real* input_data = THTensor_(data)(input);
long i;
#pragma omp parallel for private(i)
for(i = 0; i < THTensor_(nElement)(input); i++)
output_data[i] = sqrt(input_data[i] + bias);
}
return 1;
}
static int nn_(Sqrt_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
THTensor_(resizeAs)(gradInput, input);
if (output->nDimension == 1 ||
!THTensor_(isContiguous)(output) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, \
*gradInput_data = ((*output_data == 0.0) ? 0.0 : \
(0.5 * (*gradOutput_data / *output_data))););
}
else
{
real* gradOutput_data = THTensor_(data)(gradOutput);
real* gradInput_data = THTensor_(data)(gradInput);
real* output_data = THTensor_(data)(output);
long i;
#pragma omp parallel for private(i)
for(i = 0; i < THTensor_(nElement)(output); i++)
if (output_data[i] == 0.0) {
gradInput_data[i] = 0.0;
} else {
gradInput_data[i] = 0.5 * (gradOutput_data[i] / output_data[i]);
}
}
return 1;
}
static const struct luaL_Reg nn_(Sqrt__) [] = {
{"Sqrt_updateOutput", nn_(Sqrt_updateOutput)},
{"Sqrt_updateGradInput", nn_(Sqrt_updateGradInput)},
{NULL, NULL}
};
static void nn_(Sqrt_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(Sqrt__), "nn");
lua_pop(L,1);
}
#endif
|
GrB_BinaryOp_wait.c | //------------------------------------------------------------------------------
// GrB_BinaryOp_wait: wait for a user-defined GrB_BinaryOp to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GrB_BinaryOp has no pending
// operations to wait for. All this method does is verify that the op is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GrB_BinaryOp_wait // no work, just check if the GrB_BinaryOp is valid
(
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GrB_BinaryOp *op
#else
GrB_BinaryOp op,
GrB_WaitMode waitmode
#endif
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#if (GxB_IMPLEMENTATION_MAJOR <= 5)
GB_WHERE1 ("GrB_BinaryOp_wait (&op)") ;
GB_RETURN_IF_NULL (op) ;
if (*op == GxB_IGNORE_DUP) return (GrB_SUCCESS) ; // nothing to do
GB_RETURN_IF_NULL_OR_FAULTY (*op) ;
#else
GB_WHERE1 ("GrB_BinaryOp_wait (op, waitmode)") ;
if (op == GxB_IGNORE_DUP) return (GrB_SUCCESS) ; // nothing to do
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
#endif
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(12*t1+Ny+15,24)),floord(24*t2+Ny+11,24)),floord(24*t1-24*t2+Nz+Ny+13,24));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-6,8)),ceild(3*t1-14,16)),ceild(24*t2-Nz-51,64)),ceild(24*t3-Ny-51,64));t4<=min(min(min(min(floord(4*Nt+Nx-9,64),floord(12*t1+Nx+15,64)),floord(24*t2+Nx+11,64)),floord(24*t3+Nx+11,64)),floord(24*t1-24*t2+Nz+Nx+13,64));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),6*t3+4),16*t4+14);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
DRB071-targetparallelfor-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
use of omp target: len is not mapped. It should be firstprivate within target.
*/
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[len];
#pragma omp parallel for private(i)
for (i=0; i<len; i++)
a[i]= i;
#pragma omp parallel for private(i)
for (i=0;i< len;i++)
a[i]=a[i]+1;
for (i=0; i<len; i++)
printf("%d", a[i]);
return 0;
}
|
kmp_sch_simd_runtime_guided.c | // RUN: %libomp-compile
// RUN: env OMP_SCHEDULE=guided %libomp-run
// RUN: env OMP_SCHEDULE=guided,1 %libomp-run 1
// RUN: env OMP_SCHEDULE=guided,2 %libomp-run 2
// RUN: env OMP_SCHEDULE=dynamic %libomp-run
// RUN: env OMP_SCHEDULE=dynamic,1 %libomp-run 1
// RUN: env OMP_SCHEDULE=dynamic,2 %libomp-run 2
// RUN: env OMP_SCHEDULE=auto %libomp-run
// REQUIRES: openmp-4.5
// The test checks schedule(simd:runtime)
// in combination with OMP_SCHEDULE=guided[,chunk]
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#if defined(WIN32) || defined(_WIN32)
#include <windows.h>
#define delay() Sleep(1);
#define seten(a,b,c) _putenv_s((a),(b))
#else
#include <unistd.h>
#define delay() usleep(10);
#define seten(a,b,c) setenv((a),(b),(c))
#endif
#define UBOUND 100
#define SIMD_LEN 4
int err = 0;
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL.
enum sched {
kmp_sch_static_balanced_chunked = 45,
kmp_sch_guided_simd = 46,
kmp_sch_runtime_simd = 47,
};
typedef unsigned u32;
typedef long long i64;
typedef unsigned long long u64;
typedef struct {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
#ifdef __cplusplus
extern "C" {
#endif
int __kmpc_global_thread_num(id*);
void __kmpc_barrier(id*, int gtid);
void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int);
void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64);
int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*);
int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*);
#ifdef __cplusplus
} // extern "C"
#endif
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
// ---------------------------------------------------------------------------
void
run_loop(
int loop_lb, // Loop lower bound.
int loop_ub, // Loop upper bound.
int loop_st, // Loop stride.
int lchunk
) {
static int volatile loop_sync = 0;
int lb; // Chunk lower bound.
int ub; // Chunk upper bound.
int st; // Chunk stride.
int rc;
int nthreads = omp_get_num_threads();
int tid = omp_get_thread_num();
int gtid = __kmpc_global_thread_num(&loc);
int last;
int tc = (loop_ub - loop_lb) / loop_st + 1;
int ch;
int no_chunk = 0;
if (lchunk == 0) {
no_chunk = 1;
lchunk = 1;
}
ch = lchunk * SIMD_LEN;
#if _DEBUG > 1
printf("run_loop gtid %d tid %d (lb=%d, ub=%d, st=%d, ch=%d)\n",
gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, lchunk);
#endif
// Don't test degenerate cases that should have been discovered by codegen.
if (loop_st == 0)
return;
if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub)
return;
__kmpc_dispatch_init_4(&loc, gtid, kmp_sch_runtime_simd,
loop_lb, loop_ub, loop_st, SIMD_LEN);
{
// Let the master thread handle the chunks alone.
int chunk; // No of current chunk.
int last_ub; // Upper bound of the last processed chunk.
u64 cur; // Number of interations in current chunk.
u64 max; // Max allowed iterations for current chunk.
int undersized = 0;
last_ub = loop_ub;
chunk = 0;
max = (loop_ub - loop_lb) / loop_st + 1;
// The first chunk can consume all iterations.
while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) {
++ chunk;
#if _DEBUG
printf("th %d: chunk=%d, lb=%d, ub=%d ch %d\n",
tid, chunk, (int)lb, (int)ub, (int)(ub-lb+1));
#endif
// Check if previous chunk (it is not the final chunk) is undersized.
if (undersized)
printf("Error with chunk %d, th %d, err %d\n", chunk, tid, ++err);
if (loop_st > 0) {
if (!(ub <= loop_ub))
printf("Error with ub %d, %d, ch %d, err %d\n",
(int)ub, (int)loop_ub, chunk, ++err);
if (!(lb <= ub))
printf("Error with bounds %d, %d, %d, err %d\n",
(int)lb, (int)ub, chunk, ++err);
} else {
if (!(ub >= loop_ub))
printf("Error with ub %d, %d, %d, err %d\n",
(int)ub, (int)loop_ub, chunk, ++err);
if (!(lb >= ub))
printf("Error with bounds %d, %d, %d, err %d\n",
(int)lb, (int)ub, chunk, ++err);
}; // if
// Stride should not change.
if (!(st == loop_st))
printf("Error with st %d, %d, ch %d, err %d\n",
(int)st, (int)loop_st, chunk, ++err);
cur = ( ub - lb ) / loop_st + 1;
// Guided scheduling uses FP computations, so current chunk may
// be a bit bigger (+1) than allowed maximum.
if (!( cur <= max + 1))
printf("Error with iter %d, %d, err %d\n", cur, max, ++err);
// Update maximum for the next chunk.
if (!last && cur % ch)
printf("Error with chunk %d, %d, ch %d, tid %d, err %d\n",
chunk, (int)cur, ch, tid, ++err);
if (last && !no_chunk && cur > ch && nthreads > 1)
printf("Error: too big last chunk %d (%d), tid %d, err %d\n",
(int)cur, ch, tid, ++err);
if (cur < max)
max = cur;
last_ub = ub;
undersized = (cur < ch);
#if _DEBUG > 1
if (last)
printf("under%d cur %d, ch %d, tid %d, ub %d, lb %d, st %d =======\n",
undersized,cur,ch,tid,ub,lb,loop_st);
#endif
} // while
// Must have the right last iteration index.
if (loop_st > 0) {
if (!(last_ub <= loop_ub))
printf("Error with last1 %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_ub, chunk, ++err);
if (last && !(last_ub + loop_st > loop_ub))
printf("Error with last2 %d, %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err);
} else {
if (!(last_ub >= loop_ub))
printf("Error with last1 %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_ub, chunk, ++err);
if (last && !(last_ub + loop_st < loop_ub))
printf("Error with last2 %d, %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err);
} // if
}
__kmpc_barrier(&loc, gtid);
} // run_loop
int main(int argc, char *argv[])
{
int chunk = 0;
if (argc > 1) {
// expect chunk size as a parameter
chunk = atoi(argv[1]);
}
#pragma omp parallel //num_threads(num_th)
run_loop(0, UBOUND, 1, chunk);
if (err) {
printf("failed, err = %d\n", err);
return 1;
} else {
printf("passed\n");
return 0;
}
}
|
omp_task_red_taskloop.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int r;
int work(int k, int l)
{
return k + l + 1;
}
void bar(int i) {
#pragma omp taskgroup task_reduction(+:r)
{ int th_gen = omp_get_thread_num();
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 0);
printf("executing task (%d, 0), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 1);
printf("executing task (%d, 1), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
}
}
int foo() {
int i;
int th_gen = omp_get_thread_num();
#pragma omp taskgroup task_reduction(+:r)
{
bar(0);
}
printf("th %d passed bar0\n", th_gen);
#pragma omp taskloop reduction(+:r) firstprivate(th_gen)
for (i = 1; i < 4; ++i) {
bar(i);
printf("th %d (gen by th %d) passed bar%d in taskloop\n", omp_get_thread_num(), th_gen, i);
#pragma omp task in_reduction(+:r)
r += i;
}
return 0;
}
// res = 2*((1+2)+(2+3)+(3+4)+(4+5)+1+2+3) = 60
#define res 60
int main()
{
r = 0;
#pragma omp parallel num_threads(2)
foo();
if (r == res) {
return 0;
} else {
printf("error r = %d (!= %d)\n", r, res);
return 1;
}
}
|
ag.h | int algoritmoGenetico(int N, int p, int np, Chromo *Best, int prob, int numMaxGen, clock_t start)
{
int numthreads = 5;
omp_set_num_threads(numthreads);
int posminlocal;
int countGen = 0; // Contador de Generaciones
Chromo *parents = (Chromo *)malloc(sizeof(Chromo) * np);
Chromo *population = (Chromo *)malloc(sizeof(Chromo) * p);
reservaMemoria(population, parents, p, np, N);
int inicio, fin, idthread;
int Bestfitness = 100000;
// agregar share
#pragma omp parallel private(idthread, inicio, fin, posminlocal) shared(population, countGen, Bestfitness, Best, parents)
{
idthread = omp_get_thread_num();
inicio = (idthread * (p / numthreads));
fin = inicio + (p / numthreads);
// printf("Incio: %d Fin: %d\n",inicio,fin);
// Generamos la poblacion incial
InitConf(population, N, inicio, fin); // check
// Calculamos el fit de la poblacion inicial
calFit(population, N, inicio, fin); // check
posminlocal = BuscaMin(population, inicio, fin);
#pragma omp critical
{
if (population[posminlocal].fitness < Bestfitness)
{
copyBest(Best, population[posminlocal], N);
Bestfitness = population[posminlocal].fitness;
}
}
while ((Bestfitness > 0) && (countGen < numMaxGen))
{
if (idthread == 0)
{
// Seleccion de padres
selectChampionship(parents, population, N, p); // check
// Cruza
Crossover(parents, population, N,0 ,np); // check
}
#pragma omp barrier
// Mutacion
mutation(population, prob, N, inicio, fin);
// Calculo del Fit
calFit(population, N, inicio, fin);
// Ordenamos
// Insertion_sort(population, p);
posminlocal = BuscaMin(population, inicio, fin);
#pragma omp critical
{
if (population[posminlocal].fitness < Bestfitness)
{
copyBest(Best, population[posminlocal], N);
Bestfitness = population[posminlocal].fitness;
}
}
#pragma omp master
{
countGen++;
}
#pragma omp barrier
}
}
return countGen;
}
|
expected_output.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
//---------------------------------------------------------------------
// program SP
//---------------------------------------------------------------------
//----------
// Class S:
//----------
//----------
// Class W:
//----------
//----------
// Class A:
//----------
//----------
// Class B:
//----------
//----------
// Class C:
//----------
//----------
// Class D:
//----------
//----------
// Class E:
//----------
struct anon_NAS_SP_c_78 {
double real;
double imag;
};
typedef struct anon_NAS_SP_c_78 dcomplex;
/*common /global/*/
int grid_points[3];
int nx2;
int ny2;
int nz2;
/*common /constants/*/
double tx1;
double tx2;
double tx3;
double ty1;
double ty2;
double ty3;
double tz1;
double tz2;
double tz3;
double dx1;
double dx2;
double dx3;
double dx4;
double dx5;
double dy1;
double dy2;
double dy3;
double dy4;
double dy5;
double dz1;
double dz2;
double dz3;
double dz4;
double dz5;
double dssp;
double dt;
double ce[5][13];
double dxmax;
double dymax;
double dzmax;
double xxcon1;
double xxcon2;
double xxcon3;
double xxcon4;
double xxcon5;
double dx1tx1;
double dx2tx1;
double dx3tx1;
double dx4tx1;
double dx5tx1;
double yycon1;
double yycon2;
double yycon3;
double yycon4;
double yycon5;
double dy1ty1;
double dy2ty1;
double dy3ty1;
double dy4ty1;
double dy5ty1;
double zzcon1;
double zzcon2;
double zzcon3;
double zzcon4;
double zzcon5;
double dz1tz1;
double dz2tz1;
double dz3tz1;
double dz4tz1;
double dz5tz1;
double dnxm1;
double dnym1;
double dnzm1;
double c1c2;
double c1c5;
double c3c4;
double c1345;
double conz1;
double c1;
double c2;
double c3;
double c4;
double c5;
double c4dssp;
double c5dssp;
double dtdssp;
double dttx1;
double bt;
double dttx2;
double dtty1;
double dtty2;
double dttz1;
double dttz2;
double c2dttx1;
double c2dtty1;
double c2dttz1;
double comz1;
double comz4;
double comz5;
double comz6;
double c3c4tx3;
double c3c4ty3;
double c3c4tz3;
double c2iv;
double con43;
double con16;
//---------------------------------------------------------------------
// To improve cache performance, grid dimensions padded by 1
// for even number sizes only
//---------------------------------------------------------------------
/*common /fields/*/
double u[36][37][37][5];
double us[36][37][37];
double vs[36][37][37];
double ws[36][37][37];
double qs[36][37][37];
double rho_i[36][37][37];
double speed[36][37][37];
double square[36][37][37];
double rhs[36][37][37][5];
double forcing[36][37][37][5];
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
void initialize();
void lhsinit(int ni, int nj, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]);
void lhsinitj(int nj, int ni, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void ninvr();
void y_solve();
void pinvr();
void z_solve();
void tzetar();
void add();
void txinvr();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *Class, int *verified);
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified);
double start[64];
double elapsed[64];
double elapsed_time();
void timer_clear(int n);
void timer_start(int n);
void timer_stop(int n);
double timer_read(int n);
void wtime(double *t);
int main(int argc, char *argv[]) {
int i, niter, step, n3;
double mflops;
double t;
double tmax;
double trecs[16];
int verified;
char Class;
char *t_names[16];
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - SP Benchmark\n\n");
niter = 400;
dt = 0.0015;
grid_points[0] = 36;
grid_points[1] = 36;
grid_points[2] = 36;
printf(" Size: %4dx%4dx%4d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %4d dt: %10.6f\n", niter, dt);
printf("\n");
if((grid_points[0] > 36) || (grid_points[1] > 36) || (grid_points[2] > 36)) {
printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
return 0;
}
nx2 = grid_points[0] - 2;
ny2 = grid_points[1] - 2;
nz2 = grid_points[2] - 2;
set_constants();
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 15; i++) {
timer_clear(i);
}
exact_rhs();
initialize();
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi();
initialize();
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 15; i++) {
timer_clear(i);
}
timer_start(1);
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
printf#267{printf(" Time step %4d\n", step)}
compute_rhs#270{compute_rhs()}
txinvr#271{txinvr()}
x_solve#272{x_solve()}
y_solve#273{y_solve()}
z_solve#274{z_solve()}
add#275{add()}
****************************************/
for(step = 1; step <= niter; step++) {
if((step % 20) == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
adi();
}
timer_stop(1);
tmax = timer_read(1);
verify(niter, &Class, &verified);
if(tmax != 0.0) {
n3 = grid_points[0] * grid_points[1] * grid_points[2];
t = (grid_points[0] + grid_points[1] + grid_points[2]) / 3.0;
mflops = (881.174 * (double) n3 - 4683.91 * (t * t) + 11484.5 * t - 19272.4) * (double) niter / (tmax * 1000000.0);
}
else {
mflops = 0.0;
}
print_results("SP", Class, grid_points[0], grid_points[1], grid_points[2], niter, tmax, mflops, " floating point", verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
//---------------------------------------------------------------------
// this function computes the norm of the difference between the
// computed solution and the exact solution
//---------------------------------------------------------------------
void error_norm(double rms[5]) {
int i, j, k, m, d;
double xi;
double eta;
double zeta;
double u_exact[5];
double add;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, add) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
// #pragma omp parallel for default(shared) private(j, i, m, eta, xi, add) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi, add) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
exact_solution(xi, eta, zeta, u_exact);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
add = u[k][j][i][m] - u_exact[m];
rms[m] = rms[m] + add * add;
}
}
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(d = 0; d < 3; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
void rhs_norm(double rms[5]) {
int i, j, k, d, m;
double add;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, add) firstprivate(nz2, ny2, nx2, rhs) reduction(+ : rms[:5])
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m, add) firstprivate(ny2, nx2, k, rhs) reduction(+ : rms[:5])
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m, add) firstprivate(nx2, k, j, rhs) reduction(+ : rms[:5])
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
add = rhs[k][j][i][m];
rms[m] = rms[m] + add * add;
}
}
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(d = 0; d < 3; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
//---------------------------------------------------------------------
// compute the right hand side based on exact solution
//---------------------------------------------------------------------
void exact_rhs() {
double dtemp[5];
double xi;
double eta;
double zeta;
double dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
double ue[36][5];
double buf[36][5];
double q[36];
double cuf[36];
//---------------------------------------------------------------------
// initialize
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = 0.0;
}
}
}
}
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, dtpp, im1, ip1) firstprivate(dnzm1, dnym1, dnxm1, tx2, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(k = 1; k <= grid_points[2] - 2; k++) {
zeta = (double) k * dnzm1;
// #pragma omp parallel for default(shared) private(j, i, m, eta, xi, dtpp, im1, ip1) firstprivate(dnym1, dnxm1, zeta, tx2, k, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(j = 1; j <= grid_points[1] - 2; j++) {
eta = (double) j * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi, dtpp) firstprivate(dnxm1, zeta, eta, grid_points, ce, dtemp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] + buf[i][3] * ue[i][3]);
}
// #pragma omp parallel for default(shared) private(i, im1, ip1) firstprivate(tx2, k, j, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, grid_points, ue, q, buf, cuf)
for(i = 1; i <= grid_points[0] - 2; i++) {
im1 = i - 1;
ip1 = i + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - tx2 * (ue[ip1][1] - ue[im1][1]) + dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * ((ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1])) - (ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) + xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) + dx2tx1 * (ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * (ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) + xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) + dx3tx1 * (ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tx2 * (ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) + xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) + dx4tx1 * (ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tx2 * (buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) - buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] + buf[im1][0]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) + dx5tx1 * (ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
i = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
i = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
// #pragma omp parallel for default(shared) private(i, m) firstprivate(dssp, k, j, grid_points, ue)
for(i = 3; i <= grid_points[0] - 4; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
i = grid_points[0] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);
i = grid_points[0] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);
}
}
}
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, i, j, m, zeta, xi, eta, dtpp, jm1, jp1) firstprivate(dnzm1, dnxm1, dnym1, ty2, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(k = 1; k <= grid_points[2] - 2; k++) {
zeta = (double) k * dnzm1;
// #pragma omp parallel for default(shared) private(i, j, m, xi, eta, dtpp, jm1, jp1) firstprivate(dnxm1, dnym1, zeta, ty2, k, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(i = 1; i <= grid_points[0] - 2; i++) {
xi = (double) i * dnxm1;
// #pragma omp parallel for default(shared) private(j, m, eta, dtpp) firstprivate(dnym1, zeta, xi, grid_points, ce, dtemp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] + buf[j][3] * ue[j][3]);
}
// #pragma omp parallel for default(shared) private(j, jm1, jp1) firstprivate(ty2, k, i, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, grid_points, ue, buf, q, cuf)
for(j = 1; j <= grid_points[1] - 2; j++) {
jm1 = j - 1;
jp1 = j + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - ty2 * (ue[jp1][2] - ue[jm1][2]) + dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - ty2 * (ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) + yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) + dy2ty1 * (ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - ty2 * ((ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1])) - (ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) + yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) + dy3ty1 * (ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - ty2 * (ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) + yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) + dy4ty1 * (ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - ty2 * (buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) - buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] + buf[jm1][0]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) + dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
j = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
j = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
// #pragma omp parallel for default(shared) private(j, m) firstprivate(dssp, k, i, grid_points, ue)
for(j = 3; j <= grid_points[1] - 4; j++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
j = grid_points[1] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);
j = grid_points[1] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);
}
}
}
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, i, k, m, eta, xi, zeta, dtpp, km1, kp1) firstprivate(dnym1, dnxm1, dnzm1, tz2, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(j = 1; j <= grid_points[1] - 2; j++) {
eta = (double) j * dnym1;
// #pragma omp parallel for default(shared) private(i, k, m, xi, zeta, dtpp, km1, kp1) firstprivate(dnxm1, dnzm1, eta, tz2, j, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(i = 1; i <= grid_points[0] - 2; i++) {
xi = (double) i * dnxm1;
// #pragma omp parallel for default(shared) private(k, m, zeta, dtpp) firstprivate(dnzm1, eta, xi, grid_points, ce, dtemp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] + buf[k][3] * ue[k][3]);
}
// #pragma omp parallel for default(shared) private(k, km1, kp1) firstprivate(tz2, j, i, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, grid_points, ue, buf, q, cuf)
for(k = 1; k <= grid_points[2] - 2; k++) {
km1 = k - 1;
kp1 = k + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - tz2 * (ue[kp1][3] - ue[km1][3]) + dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * (ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) + zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) + dz2tz1 * (ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * (ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) + zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) + dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * ((ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1])) - (ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) + zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) + dz4tz1 * (ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * (buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) - buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0] + buf[km1][0]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) + dz5tz1 * (ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
k = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
k = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
// #pragma omp parallel for default(shared) private(k, m) firstprivate(dssp, j, i, grid_points, ue)
for(k = 3; k <= grid_points[2] - 4; k++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
k = grid_points[2] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);
k = grid_points[2] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);
}
}
}
//---------------------------------------------------------------------
// now change the sign of the forcing function,
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 1; k <= grid_points[2] - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function returns the exact solution at point xi, eta, zeta
//---------------------------------------------------------------------
void exact_solution(double xi, double eta, double zeta, double dtemp[5]) {
int m;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] + xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) + eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) + zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] + zeta * ce[m][12])));
}
}
void adi() {
compute_rhs();
txinvr();
x_solve();
y_solve();
z_solve();
add();
}
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add() {
int i, j, k, m;
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(nz2, ny2, nx2, rhs)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, rhs)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, rhs)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize() {
int i, j, k, m, ix, iy, iz;
double xi;
double eta;
double zeta;
double Pface[2][3][5];
double Pxi;
double Peta;
double Pzeta;
double temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i) firstprivate(grid_points)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i) firstprivate(k, grid_points)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(k, j, grid_points)
for(i = 0; i <= grid_points[0] - 1; i++) {
u[k][j][i][0] = 1.0;
u[k][j][i][1] = 0.0;
u[k][j][i][2] = 0.0;
u[k][j][i][3] = 0.0;
u[k][j][i][4] = 1.0;
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
exact_solution#787{exact_solution(Pxi, eta, zeta, &Pface[ix][0][0])}
exact_solution#793{exact_solution(xi, Peta, zeta, &Pface[iy][1][0])}
exact_solution#799{exact_solution(xi, eta, Pzeta, &Pface[iz][2][0])}
****************************************/
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
exact_solution#787{exact_solution(Pxi, eta, zeta, &Pface[ix][0][0])}
exact_solution#793{exact_solution(xi, Peta, zeta, &Pface[iy][1][0])}
exact_solution#799{exact_solution(xi, eta, Pzeta, &Pface[iz][2][0])}
****************************************/
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
exact_solution#787{exact_solution(Pxi, eta, zeta, &Pface[ix][0][0])}
exact_solution#793{exact_solution(xi, Peta, zeta, &Pface[iy][1][0])}
exact_solution#799{exact_solution(xi, eta, Pzeta, &Pface[iz][2][0])}
****************************************/
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(ix = 0; ix < 2; ix++) {
Pxi = (double) ix;
exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(iy = 0; iy < 2; iy++) {
Peta = (double) iy;
exact_solution(xi, Peta, zeta, &Pface[iy][1][0]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(iz = 0; iz < 2; iz++) {
Pzeta = (double) iz;
exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];
u[k][j][i][m] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
xi = 0.0;
i = 0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
// #pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
xi = 1.0;
i = grid_points[0] - 1;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
// #pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
eta = 0.0;
j = 0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
eta = 1.0;
j = grid_points[1] - 1;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
zeta = 0.0;
k = 0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
zeta = 1.0;
k = grid_points[2] - 1;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
// #pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
}
void lhsinit(int ni, int nj, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]) {
int j, m;
//---------------------------------------------------------------------
// zap the whole left hand side for starters
// set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, m) firstprivate(nj, ni)
for(j = 1; j <= nj; j++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
lhs[j][0][m] = 0.0;
lhsp[j][0][m] = 0.0;
lhsm[j][0][m] = 0.0;
lhs[j][ni][m] = 0.0;
lhsp[j][ni][m] = 0.0;
lhsm[j][ni][m] = 0.0;
}
lhs[j][0][2] = 1.0;
lhsp[j][0][2] = 1.0;
lhsm[j][0][2] = 1.0;
lhs[j][ni][2] = 1.0;
lhsp[j][ni][2] = 1.0;
lhsm[j][ni][2] = 1.0;
}
}
void lhsinitj(int nj, int ni, double lhs[37][37][5], double lhsp[37][37][5], double lhsm[37][37][5]) {
int i, m;
//---------------------------------------------------------------------
// zap the whole left hand side for starters
// set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i, m) firstprivate(ni, nj)
for(i = 1; i <= ni; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
lhs[0][i][m] = 0.0;
lhsp[0][i][m] = 0.0;
lhsm[0][i][m] = 0.0;
lhs[nj][i][m] = 0.0;
lhsp[nj][i][m] = 0.0;
lhsm[nj][i][m] = 0.0;
}
lhs[0][i][2] = 1.0;
lhsp[0][i][2] = 1.0;
lhsm[0][i][2] = 1.0;
lhs[nj][i][2] = 1.0;
lhsp[nj][i][2] = 1.0;
lhsm[nj][i][2] = 1.0;
}
}
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
void ninvr() {
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp parallel for default(shared) private(k, j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nz2, ny2, nx2, bt)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(ny2, nx2, k, bt)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nx2, k, j, bt)
for(i = 1; i <= nx2; i++) {
r1 = rhs[k][j][i][0];
r2 = rhs[k][j][i][1];
r3 = rhs[k][j][i][2];
r4 = rhs[k][j][i][3];
r5 = rhs[k][j][i][4];
t1 = bt * r3;
t2 = 0.5 * (r4 + r5);
rhs[k][j][i][0] = -r2;
rhs[k][j][i][1] = r1;
rhs[k][j][i][2] = bt * (r4 - r5);
rhs[k][j][i][3] = -t1 + t2;
rhs[k][j][i][4] = t1 + t2;
}
}
}
}
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
void pinvr() {
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp parallel for default(shared) private(k, j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nz2, ny2, nx2, bt)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, r1, r2, r3, r4, r5, t1, t2) firstprivate(ny2, nx2, k, bt)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, r1, r2, r3, r4, r5, t1, t2) firstprivate(nx2, k, j, bt)
for(i = 1; i <= nx2; i++) {
r1 = rhs[k][j][i][0];
r2 = rhs[k][j][i][1];
r3 = rhs[k][j][i][2];
r4 = rhs[k][j][i][3];
r5 = rhs[k][j][i][4];
t1 = bt * r1;
t2 = 0.5 * (r4 + r5);
rhs[k][j][i][0] = bt * (r4 - r5);
rhs[k][j][i][1] = -r3;
rhs[k][j][i][2] = r2;
rhs[k][j][i][3] = -t1 + t2;
rhs[k][j][i][4] = t1 + t2;
}
}
}
}
void compute_rhs() {
int i, j, k, m;
double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
//---------------------------------------------------------------------
// compute the reciprocal of density, and the kinetic energy,
// and the speed of sound.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, rho_inv, aux) firstprivate(c1c2, grid_points, u)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i, rho_inv, aux) firstprivate(k, c1c2, grid_points, u)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i, rho_inv, aux) firstprivate(k, j, c1c2, grid_points, u)
for(i = 0; i <= grid_points[0] - 1; i++) {
rho_inv = 1.0 / u[k][j][i][0];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][j][i][1] * rho_inv;
vs[k][j][i] = u[k][j][i][2] * rho_inv;
ws[k][j][i] = u[k][j][i][3] * rho_inv;
square[k][j][i] = 0.5 * (u[k][j][i][1] * u[k][j][i][1] + u[k][j][i][2] * u[k][j][i][2] + u[k][j][i][3] * u[k][j][i][3]) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
//-------------------------------------------------------------------
// (don't need speed and ainx until the lhs computation)
//-------------------------------------------------------------------
aux = c1c2 * rho_inv * (u[k][j][i][4] - square[k][j][i]);
speed[k][j][i] = sqrt(aux);
}
}
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, forcing)
for(k = 0; k <= grid_points[2] - 1; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points, forcing)
for(j = 0; j <= grid_points[1] - 1; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points, forcing)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = forcing[k][j][i][m];
}
}
}
}
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, uijk, up1, um1) firstprivate(nz2, ny2, nx2, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, dssp, us, u, square, vs, ws, qs, rho_i)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, uijk, up1, um1) firstprivate(ny2, nx2, k, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, us, u, square, vs, ws, qs, rho_i)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, uijk, up1, um1) firstprivate(nx2, k, j, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, us, u, square, vs, ws, qs, rho_i)
for(i = 1; i <= nx2; i++) {
uijk = us[k][j][i];
up1 = us[k][j][i + 1];
um1 = us[k][j][i - 1];
rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i + 1][0] - 2.0 * u[k][j][i][0] + u[k][j][i - 1][0]) - tx2 * (u[k][j][i + 1][1] - u[k][j][i - 1][1]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i + 1][1] - 2.0 * u[k][j][i][1] + u[k][j][i - 1][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[k][j][i + 1][1] * up1 - u[k][j][i - 1][1] * um1 + (u[k][j][i + 1][4] - square[k][j][i + 1] - u[k][j][i - 1][4] + square[k][j][i - 1]) * c2);
rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i + 1][2] - 2.0 * u[k][j][i][2] + u[k][j][i - 1][2]) + xxcon2 * (vs[k][j][i + 1] - 2.0 * vs[k][j][i] + vs[k][j][i - 1]) - tx2 * (u[k][j][i + 1][2] * up1 - u[k][j][i - 1][2] * um1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i + 1][3] - 2.0 * u[k][j][i][3] + u[k][j][i - 1][3]) + xxcon2 * (ws[k][j][i + 1] - 2.0 * ws[k][j][i] + ws[k][j][i - 1]) - tx2 * (u[k][j][i + 1][3] * up1 - u[k][j][i - 1][3] * um1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i + 1][4] - 2.0 * u[k][j][i][4] + u[k][j][i - 1][4]) + xxcon3 * (qs[k][j][i + 1] - 2.0 * qs[k][j][i] + qs[k][j][i - 1]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[k][j][i + 1][4] * rho_i[k][j][i + 1] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j][i - 1][4] * rho_i[k][j][i - 1]) - tx2 * ((c1 * u[k][j][i + 1][4] - c2 * square[k][j][i + 1]) * up1 - (c1 * u[k][j][i - 1][4] - c2 * square[k][j][i - 1]) * um1);
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, m, i) firstprivate(ny2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
i = 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
i = 2;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u)
for(i = 3; i <= nx2 - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
}
// #pragma omp parallel for default(shared) private(j, m, i) firstprivate(ny2, nx2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
i = nx2 - 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m]);
}
i = nx2;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 5.0 * u[k][j][i][m]);
}
}
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, vijk, vp1, vm1) firstprivate(nz2, ny2, nx2, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, dssp, vs, u, us, square, ws, qs, rho_i)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, vijk, vp1, vm1) firstprivate(ny2, nx2, k, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, vs, u, us, square, ws, qs, rho_i)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, vijk, vp1, vm1) firstprivate(nx2, k, j, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, vs, u, us, square, ws, qs, rho_i)
for(i = 1; i <= nx2; i++) {
vijk = vs[k][j][i];
vp1 = vs[k][j + 1][i];
vm1 = vs[k][j - 1][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j + 1][i][0] - 2.0 * u[k][j][i][0] + u[k][j - 1][i][0]) - ty2 * (u[k][j + 1][i][2] - u[k][j - 1][i][2]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j + 1][i][1] - 2.0 * u[k][j][i][1] + u[k][j - 1][i][1]) + yycon2 * (us[k][j + 1][i] - 2.0 * us[k][j][i] + us[k][j - 1][i]) - ty2 * (u[k][j + 1][i][1] * vp1 - u[k][j - 1][i][1] * vm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j + 1][i][2] - 2.0 * u[k][j][i][2] + u[k][j - 1][i][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[k][j + 1][i][2] * vp1 - u[k][j - 1][i][2] * vm1 + (u[k][j + 1][i][4] - square[k][j + 1][i] - u[k][j - 1][i][4] + square[k][j - 1][i]) * c2);
rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j + 1][i][3] - 2.0 * u[k][j][i][3] + u[k][j - 1][i][3]) + yycon2 * (ws[k][j + 1][i] - 2.0 * ws[k][j][i] + ws[k][j - 1][i]) - ty2 * (u[k][j + 1][i][3] * vp1 - u[k][j - 1][i][3] * vm1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j + 1][i][4] - 2.0 * u[k][j][i][4] + u[k][j - 1][i][4]) + yycon3 * (qs[k][j + 1][i] - 2.0 * qs[k][j][i] + qs[k][j - 1][i]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[k][j + 1][i][4] * rho_i[k][j + 1][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j - 1][i][4] * rho_i[k][j - 1][i]) - ty2 * ((c1 * u[k][j + 1][i][4] - c2 * square[k][j + 1][i]) * vp1 - (c1 * u[k][j - 1][i][4] - c2 * square[k][j - 1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
j = 1;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
j = 2;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u)
for(j = 3; j <= ny2 - 2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
}
j = ny2 - 1;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, ny2, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m]);
}
}
j = ny2;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, j, k, ny2, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 5.0 * u[k][j][i][m]);
}
}
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, wijk, wp1, wm1) firstprivate(nz2, ny2, nx2, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, ws, u, us, vs, square, qs, rho_i)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, wijk, wp1, wm1) firstprivate(ny2, nx2, k, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, ws, u, us, vs, square, qs, rho_i)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, wijk, wp1, wm1) firstprivate(nx2, k, j, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, ws, u, us, vs, square, qs, rho_i)
for(i = 1; i <= nx2; i++) {
wijk = ws[k][j][i];
wp1 = ws[k + 1][j][i];
wm1 = ws[k - 1][j][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k + 1][j][i][0] - 2.0 * u[k][j][i][0] + u[k - 1][j][i][0]) - tz2 * (u[k + 1][j][i][3] - u[k - 1][j][i][3]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k + 1][j][i][1] - 2.0 * u[k][j][i][1] + u[k - 1][j][i][1]) + zzcon2 * (us[k + 1][j][i] - 2.0 * us[k][j][i] + us[k - 1][j][i]) - tz2 * (u[k + 1][j][i][1] * wp1 - u[k - 1][j][i][1] * wm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k + 1][j][i][2] - 2.0 * u[k][j][i][2] + u[k - 1][j][i][2]) + zzcon2 * (vs[k + 1][j][i] - 2.0 * vs[k][j][i] + vs[k - 1][j][i]) - tz2 * (u[k + 1][j][i][2] * wp1 - u[k - 1][j][i][2] * wm1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k + 1][j][i][3] - 2.0 * u[k][j][i][3] + u[k - 1][j][i][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[k + 1][j][i][3] * wp1 - u[k - 1][j][i][3] * wm1 + (u[k + 1][j][i][4] - square[k + 1][j][i] - u[k - 1][j][i][4] + square[k - 1][j][i]) * c2);
rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k + 1][j][i][4] - 2.0 * u[k][j][i][4] + u[k - 1][j][i][4]) + zzcon3 * (qs[k + 1][j][i] - 2.0 * qs[k][j][i] + qs[k - 1][j][i]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[k + 1][j][i][4] * rho_i[k + 1][j][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k - 1][j][i][4] * rho_i[k - 1][j][i]) - tz2 * ((c1 * u[k + 1][j][i][4] - c2 * square[k + 1][j][i]) * wp1 - (c1 * u[k - 1][j][i][4] - c2 * square[k - 1][j][i]) * wm1);
}
}
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
k = 1;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
k = 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(nz2, ny2, nx2, dssp, u)
for(k = 3; k <= nz2 - 2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
}
k = nz2 - 1;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m]);
}
}
}
k = nz2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dssp, u)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dssp, u)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 5.0 * u[k][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(nz2, ny2, nx2, dt)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, m) firstprivate(ny2, nx2, k, dt)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, dt)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] * dt;
}
}
}
}
}
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
void txinvr() {
int i, j, k;
double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3, r4, r5, ac2inv;
#pragma omp parallel for default(shared) private(k, j, i, ru1, uu, vv, ww, ac, ac2inv, r1, r2, r3, r4, r5, t1, t2, t3) firstprivate(nz2, ny2, nx2, c2, bt, rho_i, us, vs, ws, speed, qs)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, ru1, uu, vv, ww, ac, ac2inv, r1, r2, r3, r4, r5, t1, t2, t3) firstprivate(ny2, nx2, k, c2, bt, rho_i, us, vs, ws, speed, qs)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, ru1, uu, vv, ww, ac, ac2inv, r1, r2, r3, r4, r5, t1, t2, t3) firstprivate(nx2, k, j, c2, bt, rho_i, us, vs, ws, speed, qs)
for(i = 1; i <= nx2; i++) {
ru1 = rho_i[k][j][i];
uu = us[k][j][i];
vv = vs[k][j][i];
ww = ws[k][j][i];
ac = speed[k][j][i];
ac2inv = ac * ac;
r1 = rhs[k][j][i][0];
r2 = rhs[k][j][i][1];
r3 = rhs[k][j][i][2];
r4 = rhs[k][j][i][3];
r5 = rhs[k][j][i][4];
t1 = c2 / ac2inv * (qs[k][j][i] * r1 - uu * r2 - vv * r3 - ww * r4 + r5);
t2 = bt * ru1 * (uu * r1 - r2);
t3 = (bt * ru1 * ac) * t1;
rhs[k][j][i][0] = r1 - t1;
rhs[k][j][i][1] = -ru1 * (ww * r1 - r4);
rhs[k][j][i][2] = ru1 * (vv * r1 - r3);
rhs[k][j][i][3] = -t2 + t3;
rhs[k][j][i][4] = t2 + t3;
}
}
}
}
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
void tzetar() {
int i, j, k;
double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5;
double btuz, ac2u, uzik1;
#pragma omp parallel for default(shared) private(k, j, i, xvel, yvel, zvel, ac, ac2u, r1, r2, r3, r4, r5, uzik1, btuz, t1, t2, t3) firstprivate(nz2, ny2, nx2, bt, c2iv, us, vs, ws, speed, u, qs)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(j, i, xvel, yvel, zvel, ac, ac2u, r1, r2, r3, r4, r5, uzik1, btuz, t1, t2, t3) firstprivate(ny2, nx2, k, bt, c2iv, us, vs, ws, speed, u, qs)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, xvel, yvel, zvel, ac, ac2u, r1, r2, r3, r4, r5, uzik1, btuz, t1, t2, t3) firstprivate(nx2, k, j, bt, c2iv, us, vs, ws, speed, u, qs)
for(i = 1; i <= nx2; i++) {
xvel = us[k][j][i];
yvel = vs[k][j][i];
zvel = ws[k][j][i];
ac = speed[k][j][i];
ac2u = ac * ac;
r1 = rhs[k][j][i][0];
r2 = rhs[k][j][i][1];
r3 = rhs[k][j][i][2];
r4 = rhs[k][j][i][3];
r5 = rhs[k][j][i][4];
uzik1 = u[k][j][i][0];
btuz = bt * uzik1;
t1 = btuz / ac * (r4 + r5);
t2 = r3 + t1;
t3 = btuz * (r4 - r5);
rhs[k][j][i][0] = t2;
rhs[k][j][i][1] = -uzik1 * r2 + xvel * t2;
rhs[k][j][i][2] = uzik1 * r1 + yvel * t2;
rhs[k][j][i][3] = zvel * t2 + t3;
rhs[k][j][i][4] = uzik1 * (-xvel * r2 + yvel * r1) + qs[k][j][i] * t2 + c2iv * ac2u * t1 + zvel * t3;
}
}
}
}
//---------------------------------------------------------------------
// verification routine
//---------------------------------------------------------------------
void verify(int no_time_steps, char *Class, int *verified) {
double xcrref[5];
double xceref[5];
double xcrdif[5];
double xcedif[5];
double epsilon;
double xce[5];
double xcr[5];
double dtref = 0.0;
int m;
//---------------------------------------------------------------------
// tolerance level
//---------------------------------------------------------------------
epsilon = 1.0e-08;
//---------------------------------------------------------------------
// compute the error norm and the residual norm, and exit if not printing
//---------------------------------------------------------------------
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*Class = 'U';
*verified = 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
//---------------------------------------------------------------------
// reference data for 12X12X12 grids after 100 time steps,
// with DT = 1.50e-02
//---------------------------------------------------------------------
if((grid_points[0] == 12) && (grid_points[1] == 12) && (grid_points[2] == 12) && (no_time_steps == 100)) {
*Class = 'S';
dtref = 1.5e-2;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 2.7470315451339479e-02;
xcrref[1] = 1.0360746705285417e-02;
xcrref[2] = 1.6235745065095532e-02;
xcrref[3] = 1.5840557224455615e-02;
xcrref[4] = 3.4849040609362460e-02;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 2.7289258557377227e-05;
xceref[1] = 1.0364446640837285e-05;
xceref[2] = 1.6154798287166471e-05;
xceref[3] = 1.5750704994480102e-05;
xceref[4] = 3.4177666183390531e-05;
//---------------------------------------------------------------------
// reference data for 36X36X36 grids after 400 time steps,
// with DT = 1.5e-03
//---------------------------------------------------------------------
}
else if((grid_points[0] == 36) && (grid_points[1] == 36) && (grid_points[2] == 36) && (no_time_steps == 400)) {
*Class = 'W';
dtref = 1.5e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.1893253733584e-02;
xcrref[1] = 0.1717075447775e-03;
xcrref[2] = 0.2778153350936e-03;
xcrref[3] = 0.2887475409984e-03;
xcrref[4] = 0.3143611161242e-02;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.7542088599534e-04;
xceref[1] = 0.6512852253086e-05;
xceref[2] = 0.1049092285688e-04;
xceref[3] = 0.1128838671535e-04;
xceref[4] = 0.1212845639773e-03;
//---------------------------------------------------------------------
// reference data for 64X64X64 grids after 400 time steps,
// with DT = 1.5e-03
//---------------------------------------------------------------------
}
else if((grid_points[0] == 64) && (grid_points[1] == 64) && (grid_points[2] == 64) && (no_time_steps == 400)) {
*Class = 'A';
dtref = 1.5e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 2.4799822399300195;
xcrref[1] = 1.1276337964368832;
xcrref[2] = 1.5028977888770491;
xcrref[3] = 1.4217816211695179;
xcrref[4] = 2.1292113035138280;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 1.0900140297820550e-04;
xceref[1] = 3.7343951769282091e-05;
xceref[2] = 5.0092785406541633e-05;
xceref[3] = 4.7671093939528255e-05;
xceref[4] = 1.3621613399213001e-04;
//---------------------------------------------------------------------
// reference data for 102X102X102 grids after 400 time steps,
// with DT = 1.0e-03
//---------------------------------------------------------------------
}
else if((grid_points[0] == 102) && (grid_points[1] == 102) && (grid_points[2] == 102) && (no_time_steps == 400)) {
*Class = 'B';
dtref = 1.0e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.6903293579998e+02;
xcrref[1] = 0.3095134488084e+02;
xcrref[2] = 0.4103336647017e+02;
xcrref[3] = 0.3864769009604e+02;
xcrref[4] = 0.5643482272596e+02;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.9810006190188e-02;
xceref[1] = 0.1022827905670e-02;
xceref[2] = 0.1720597911692e-02;
xceref[3] = 0.1694479428231e-02;
xceref[4] = 0.1847456263981e-01;
//---------------------------------------------------------------------
// reference data for 162X162X162 grids after 400 time steps,
// with DT = 0.67e-03
//---------------------------------------------------------------------
}
else if((grid_points[0] == 162) && (grid_points[1] == 162) && (grid_points[2] == 162) && (no_time_steps == 400)) {
*Class = 'C';
dtref = 0.67e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.5881691581829e+03;
xcrref[1] = 0.2454417603569e+03;
xcrref[2] = 0.3293829191851e+03;
xcrref[3] = 0.3081924971891e+03;
xcrref[4] = 0.4597223799176e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.2598120500183e+00;
xceref[1] = 0.2590888922315e-01;
xceref[2] = 0.5132886416320e-01;
xceref[3] = 0.4806073419454e-01;
xceref[4] = 0.5483377491301e+00;
//---------------------------------------------------------------------
// reference data for 408X408X408 grids after 500 time steps,
// with DT = 0.3e-03
//---------------------------------------------------------------------
}
else if((grid_points[0] == 408) && (grid_points[1] == 408) && (grid_points[2] == 408) && (no_time_steps == 500)) {
*Class = 'D';
dtref = 0.30e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.1044696216887e+05;
xcrref[1] = 0.3204427762578e+04;
xcrref[2] = 0.4648680733032e+04;
xcrref[3] = 0.4238923283697e+04;
xcrref[4] = 0.7588412036136e+04;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.5089471423669e+01;
xceref[1] = 0.5323514855894e+00;
xceref[2] = 0.1187051008971e+01;
xceref[3] = 0.1083734951938e+01;
xceref[4] = 0.1164108338568e+02;
//---------------------------------------------------------------------
// reference data for 1020X1020X1020 grids after 500 time steps,
// with DT = 0.1e-03
//---------------------------------------------------------------------
}
else if((grid_points[0] == 1020) && (grid_points[1] == 1020) && (grid_points[2] == 1020) && (no_time_steps == 500)) {
*Class = 'E';
dtref = 0.10e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.6255387422609e+05;
xcrref[1] = 0.1495317020012e+05;
xcrref[2] = 0.2347595750586e+05;
xcrref[3] = 0.2091099783534e+05;
xcrref[4] = 0.4770412841218e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.6742735164909e+02;
xceref[1] = 0.5390656036938e+01;
xceref[2] = 0.1680647196477e+02;
xceref[3] = 0.1536963126457e+02;
xceref[4] = 0.1575330146156e+03;
}
else {
*verified = 0;
}
//---------------------------------------------------------------------
// verification test for residuals if gridsize is one of
// the defined grid sizes above (class .ne. 'U')
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// Compute the difference of solution values and the known reference values.
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]);
xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]);
}
//---------------------------------------------------------------------
// Output the comparison of computed results to known cases.
//---------------------------------------------------------------------
if(*Class != 'U') {
printf(" Verification being performed for class %c\n", *Class);
printf(" accuracy setting for epsilon = %20.13E\n", epsilon);
*verified = (fabs(dt - dtref) <= epsilon);
if(!(*verified)) {
*Class = 'U';
printf(" DT does not match the reference value of %15.8E\n", dtref);
}
}
else {
printf(" Unknown class\n");
}
if(*Class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
}
else {
printf(" RMS-norms of residual\n");
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
if(*Class == 'U') {
printf(" %2d%20.13E\n", m + 1, xcr[m]);
}
else if(xcrdif[m] <= epsilon) {
printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
else {
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
}
if(*Class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
}
else {
printf(" RMS-norms of solution error\n");
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
if(*Class == 'U') {
printf(" %2d%20.13E\n", m + 1, xce[m]);
}
else if(xcedif[m] <= epsilon) {
printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]);
}
else {
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]);
}
}
if(*Class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
}
else if(*verified) {
printf(" Verification Successful\n");
}
else {
printf(" Verification failed\n");
}
}
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the x-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the x-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
void x_solve() {
int i, j, k, i1, i2, m;
double ru1, fac1, fac2;
double rhon[36];
double cv[36];
double lhs[37][37][5];
double lhsp[37][37][5];
double lhsm[37][37][5];
#pragma omp parallel for default(shared) private(k, j, i, m, ru1, i1, i2, fac1, fac2) firstprivate(nz2, ny2, nx2, c3c4, con43, c1c5, dx2, dx5, dxmax, dx1, dttx2, dttx1, c2dttx1, comz5, comz4, comz1, comz6, grid_points, rho_i, us, speed, lhs, lhsp, lhsm, cv, rhon)
for(k = 1; k <= nz2; k++) {
lhsinit(nx2 + 1, ny2, lhs, lhsp, lhsm);
//---------------------------------------------------------------------
// Computes the left hand side for the three x-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i, ru1) firstprivate(ny2, c3c4, k, con43, c1c5, dx2, dx5, dxmax, dx1, nx2, dttx2, dttx1, c2dttx1, grid_points, rho_i, us, cv, rhon)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i, ru1) firstprivate(c3c4, k, j, con43, c1c5, dx2, dx5, dxmax, dx1, grid_points, rho_i, us)
for(i = 0; i <= grid_points[0] - 1; i++) {
ru1 = c3c4 * rho_i[k][j][i];
cv[i] = us[k][j][i];
rhon[i] = ((((dx2 + con43 * ru1) > (dx5 + c1c5 * ru1) ? (dx2 + con43 * ru1) : (dx5 + c1c5 * ru1))) > (((dxmax + ru1) > (dx1) ? (dxmax + ru1) : (dx1))) ? (((dx2 + con43 * ru1) > (dx5 + c1c5 * ru1) ? (dx2 + con43 * ru1) : (dx5 + c1c5 * ru1))) : (((dxmax + ru1) > (dx1) ? (dxmax + ru1) : (dx1))));
}
// #pragma omp parallel for default(shared) private(i) firstprivate(nx2, j, dttx2, dttx1, c2dttx1, cv, rhon)
for(i = 1; i <= nx2; i++) {
lhs[j][i][0] = 0.0;
lhs[j][i][1] = -dttx2 * cv[i - 1] - dttx1 * rhon[i - 1];
lhs[j][i][2] = 1.0 + c2dttx1 * rhon[i];
lhs[j][i][3] = dttx2 * cv[i + 1] - dttx1 * rhon[i + 1];
lhs[j][i][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, comz5, comz4, comz1, comz6)
for(j = 1; j <= ny2; j++) {
i = 1;
lhs[j][i][2] = lhs[j][i][2] + comz5;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i][4] = lhs[j][i][4] + comz1;
lhs[j][i + 1][1] = lhs[j][i + 1][1] - comz4;
lhs[j][i + 1][2] = lhs[j][i + 1][2] + comz6;
lhs[j][i + 1][3] = lhs[j][i + 1][3] - comz4;
lhs[j][i + 1][4] = lhs[j][i + 1][4] + comz1;
}
// #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, comz1, comz4, comz6, grid_points)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(j, comz1, comz4, comz6, grid_points)
for(i = 3; i <= grid_points[0] - 4; i++) {
lhs[j][i][0] = lhs[j][i][0] + comz1;
lhs[j][i][1] = lhs[j][i][1] - comz4;
lhs[j][i][2] = lhs[j][i][2] + comz6;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i][4] = lhs[j][i][4] + comz1;
}
}
// #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, comz1, comz4, comz6, comz5, grid_points)
for(j = 1; j <= ny2; j++) {
i = grid_points[0] - 3;
lhs[j][i][0] = lhs[j][i][0] + comz1;
lhs[j][i][1] = lhs[j][i][1] - comz4;
lhs[j][i][2] = lhs[j][i][2] + comz6;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i + 1][0] = lhs[j][i + 1][0] + comz1;
lhs[j][i + 1][1] = lhs[j][i + 1][1] - comz4;
lhs[j][i + 1][2] = lhs[j][i + 1][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, fill the other factors (u+c), (u-c) by adding to
// the first
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i) firstprivate(ny2, nx2, dttx2, k, lhs, speed)
for(j = 1; j <= ny2; j++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(nx2, j, dttx2, k, lhs, speed)
for(i = 1; i <= nx2; i++) {
lhsp[j][i][0] = lhs[j][i][0];
lhsp[j][i][1] = lhs[j][i][1] - dttx2 * speed[k][j][i - 1];
lhsp[j][i][2] = lhs[j][i][2];
lhsp[j][i][3] = lhs[j][i][3] + dttx2 * speed[k][j][i + 1];
lhsp[j][i][4] = lhs[j][i][4];
lhsm[j][i][0] = lhs[j][i][0];
lhsm[j][i][1] = lhs[j][i][1] + dttx2 * speed[k][j][i - 1];
lhsm[j][i][2] = lhs[j][i][2];
lhsm[j][i][3] = lhs[j][i][3] - dttx2 * speed[k][j][i + 1];
lhsm[j][i][4] = lhs[j][i][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// perform the Thomas algorithm; first, FORWARD ELIMINATION
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i, m, i1, i2, fac1) firstprivate(ny2, k, grid_points)
for(j = 1; j <= ny2; j++) {
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RWR
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(i = 0; i <= grid_points[0] - 3; i++) {
i1 = i + 1;
i2 = i + 2;
fac1 = 1.0 / lhs[j][i][2];
lhs[j][i][3] = fac1 * lhs[j][i][3];
lhs[j][i][4] = fac1 * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
}
lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1] * lhs[j][i][3];
lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1] * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1] * rhs[k][j][i][m];
}
lhs[j][i2][1] = lhs[j][i2][1] - lhs[j][i2][0] * lhs[j][i][3];
lhs[j][i2][2] = lhs[j][i2][2] - lhs[j][i2][0] * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhs[j][i2][0] * rhs[k][j][i][m];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, m, i, i1, fac1, fac2) firstprivate(ny2, k, grid_points)
for(j = 1; j <= ny2; j++) {
i = grid_points[0] - 2;
i1 = grid_points[0] - 1;
fac1 = 1.0 / lhs[j][i][2];
lhs[j][i][3] = fac1 * lhs[j][i][3];
lhs[j][i][4] = fac1 * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
}
lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1] * lhs[j][i][3];
lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1] * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1] * rhs[k][j][i][m];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0 / lhs[j][i1][2];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i1][m] = fac2 * rhs[k][j][i1][m];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i, i1, i2, m, fac1) firstprivate(ny2, k, grid_points)
for(j = 1; j <= ny2; j++) {
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhsp use : RWR
unsolved dependency for arrayAccess rhs use : RW
unsolved dependency for arrayAccess lhsm use : RWR
****************************************/
for(i = 0; i <= grid_points[0] - 3; i++) {
i1 = i + 1;
i2 = i + 2;
m = 3;
fac1 = 1.0 / lhsp[j][i][2];
lhsp[j][i][3] = fac1 * lhsp[j][i][3];
lhsp[j][i][4] = fac1 * lhsp[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1] * lhsp[j][i][3];
lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1] * lhsp[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1] * rhs[k][j][i][m];
lhsp[j][i2][1] = lhsp[j][i2][1] - lhsp[j][i2][0] * lhsp[j][i][3];
lhsp[j][i2][2] = lhsp[j][i2][2] - lhsp[j][i2][0] * lhsp[j][i][4];
rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsp[j][i2][0] * rhs[k][j][i][m];
m = 4;
fac1 = 1.0 / lhsm[j][i][2];
lhsm[j][i][3] = fac1 * lhsm[j][i][3];
lhsm[j][i][4] = fac1 * lhsm[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1] * lhsm[j][i][3];
lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1] * lhsm[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1] * rhs[k][j][i][m];
lhsm[j][i2][1] = lhsm[j][i2][1] - lhsm[j][i2][0] * lhsm[j][i][3];
lhsm[j][i2][2] = lhsm[j][i2][2] - lhsm[j][i2][0] * lhsm[j][i][4];
rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsm[j][i2][0] * rhs[k][j][i][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i, i1, m, fac1) firstprivate(ny2, k, grid_points)
for(j = 1; j <= ny2; j++) {
i = grid_points[0] - 2;
i1 = grid_points[0] - 1;
m = 3;
fac1 = 1.0 / lhsp[j][i][2];
lhsp[j][i][3] = fac1 * lhsp[j][i][3];
lhsp[j][i][4] = fac1 * lhsp[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1] * lhsp[j][i][3];
lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1] * lhsp[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1] * rhs[k][j][i][m];
m = 4;
fac1 = 1.0 / lhsm[j][i][2];
lhsm[j][i][3] = fac1 * lhsm[j][i][3];
lhsm[j][i][4] = fac1 * lhsm[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1] * lhsm[j][i][3];
lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1] * lhsm[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1] * rhs[k][j][i][m];
//---------------------------------------------------------------------
// Scale the last row immediately
//---------------------------------------------------------------------
rhs[k][j][i1][3] = rhs[k][j][i1][3] / lhsp[j][i1][2];
rhs[k][j][i1][4] = rhs[k][j][i1][4] / lhsm[j][i1][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, m, i, i1) firstprivate(ny2, k, grid_points, lhs, lhsp, lhsm)
for(j = 1; j <= ny2; j++) {
i = grid_points[0] - 2;
i1 = grid_points[0] - 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j][i1][m];
}
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j][i1][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j][i1][4];
}
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i, m, i1, i2) firstprivate(ny2, k, grid_points, lhs, lhsp, lhsm)
for(j = 1; j <= ny2; j++) {
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(i = grid_points[0] - 3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j][i1][m] - lhs[j][i][4] * rhs[k][j][i2][m];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j][i1][3] - lhsp[j][i][4] * rhs[k][j][i2][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j][i1][4] - lhsm[j][i][4] * rhs[k][j][i2][4];
}
}
}
//---------------------------------------------------------------------
// Do the block-diagonal inversion
//---------------------------------------------------------------------
ninvr();
}
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the y-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the y-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
void y_solve() {
int i, j, k, j1, j2, m;
double ru1, fac1, fac2;
double rhoq[36];
double cv[36];
double lhs[37][37][5];
double lhsp[37][37][5];
double lhsm[37][37][5];
#pragma omp parallel for default(shared) private(k, i, j, m, ru1, j1, j2, fac1, fac2) firstprivate(nx2, ny2, c3c4, con43, c1c5, dy3, dy5, dymax, dy1, dtty2, dtty1, c2dtty1, comz5, comz4, comz1, comz6, grid_points, rho_i, vs, speed, lhs, lhsp, lhsm, cv, rhoq)
for(k = 1; k <= grid_points[2] - 2; k++) {
lhsinitj(ny2 + 1, nx2, lhs, lhsp, lhsm);
//---------------------------------------------------------------------
// Computes the left hand side for the three y-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(i, j, ru1) firstprivate(c3c4, k, con43, c1c5, dy3, dy5, dymax, dy1, dtty2, dtty1, c2dtty1, grid_points, rho_i, vs, cv, rhoq)
for(i = 1; i <= grid_points[0] - 2; i++) {
// #pragma omp parallel for default(shared) private(j, ru1) firstprivate(c3c4, k, i, con43, c1c5, dy3, dy5, dymax, dy1, grid_points, rho_i, vs)
for(j = 0; j <= grid_points[1] - 1; j++) {
ru1 = c3c4 * rho_i[k][j][i];
cv[j] = vs[k][j][i];
rhoq[j] = ((((dy3 + con43 * ru1) > (dy5 + c1c5 * ru1) ? (dy3 + con43 * ru1) : (dy5 + c1c5 * ru1))) > (((dymax + ru1) > (dy1) ? (dymax + ru1) : (dy1))) ? (((dy3 + con43 * ru1) > (dy5 + c1c5 * ru1) ? (dy3 + con43 * ru1) : (dy5 + c1c5 * ru1))) : (((dymax + ru1) > (dy1) ? (dymax + ru1) : (dy1))));
}
// #pragma omp parallel for default(shared) private(j) firstprivate(i, dtty2, dtty1, c2dtty1, grid_points, cv, rhoq)
for(j = 1; j <= grid_points[1] - 2; j++) {
lhs[j][i][0] = 0.0;
lhs[j][i][1] = -dtty2 * cv[j - 1] - dtty1 * rhoq[j - 1];
lhs[j][i][2] = 1.0 + c2dtty1 * rhoq[j];
lhs[j][i][3] = dtty2 * cv[j + 1] - dtty1 * rhoq[j + 1];
lhs[j][i][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(i, j) firstprivate(comz5, comz4, comz1, comz6, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
j = 1;
lhs[j][i][2] = lhs[j][i][2] + comz5;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i][4] = lhs[j][i][4] + comz1;
lhs[j + 1][i][1] = lhs[j + 1][i][1] - comz4;
lhs[j + 1][i][2] = lhs[j + 1][i][2] + comz6;
lhs[j + 1][i][3] = lhs[j + 1][i][3] - comz4;
lhs[j + 1][i][4] = lhs[j + 1][i][4] + comz1;
}
// #pragma omp parallel for default(shared) private(j, i) firstprivate(comz1, comz4, comz6, grid_points)
for(j = 3; j <= grid_points[1] - 4; j++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(j, comz1, comz4, comz6, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
lhs[j][i][0] = lhs[j][i][0] + comz1;
lhs[j][i][1] = lhs[j][i][1] - comz4;
lhs[j][i][2] = lhs[j][i][2] + comz6;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i][4] = lhs[j][i][4] + comz1;
}
}
// #pragma omp parallel for default(shared) private(i, j) firstprivate(comz1, comz4, comz6, comz5, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
j = grid_points[1] - 3;
lhs[j][i][0] = lhs[j][i][0] + comz1;
lhs[j][i][1] = lhs[j][i][1] - comz4;
lhs[j][i][2] = lhs[j][i][2] + comz6;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j + 1][i][0] = lhs[j + 1][i][0] + comz1;
lhs[j + 1][i][1] = lhs[j + 1][i][1] - comz4;
lhs[j + 1][i][2] = lhs[j + 1][i][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, for (the other two factors
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(j, i) firstprivate(dtty2, k, grid_points, lhs, speed)
for(j = 1; j <= grid_points[1] - 2; j++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(j, dtty2, k, grid_points, lhs, speed)
for(i = 1; i <= grid_points[0] - 2; i++) {
lhsp[j][i][0] = lhs[j][i][0];
lhsp[j][i][1] = lhs[j][i][1] - dtty2 * speed[k][j - 1][i];
lhsp[j][i][2] = lhs[j][i][2];
lhsp[j][i][3] = lhs[j][i][3] + dtty2 * speed[k][j + 1][i];
lhsp[j][i][4] = lhs[j][i][4];
lhsm[j][i][0] = lhs[j][i][0];
lhsm[j][i][1] = lhs[j][i][1] + dtty2 * speed[k][j - 1][i];
lhsm[j][i][2] = lhs[j][i][2];
lhsm[j][i][3] = lhs[j][i][3] - dtty2 * speed[k][j + 1][i];
lhsm[j][i][4] = lhs[j][i][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RWR
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(j = 0; j <= grid_points[1] - 3; j++) {
j1 = j + 1;
j2 = j + 2;
// #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(j, k, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
fac1 = 1.0 / lhs[j][i][2];
lhs[j][i][3] = fac1 * lhs[j][i][3];
lhs[j][i][4] = fac1 * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
}
lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1] * lhs[j][i][3];
lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1] * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1] * rhs[k][j][i][m];
}
lhs[j2][i][1] = lhs[j2][i][1] - lhs[j2][i][0] * lhs[j][i][3];
lhs[j2][i][2] = lhs[j2][i][2] - lhs[j2][i][0] * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhs[j2][i][0] * rhs[k][j][i][m];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
j = grid_points[1] - 2;
j1 = grid_points[1] - 1;
// #pragma omp parallel for default(shared) private(i, m, fac1, fac2) firstprivate(j, k, j1, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
fac1 = 1.0 / lhs[j][i][2];
lhs[j][i][3] = fac1 * lhs[j][i][3];
lhs[j][i][4] = fac1 * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
}
lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1] * lhs[j][i][3];
lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1] * lhs[j][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1] * rhs[k][j][i][m];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0 / lhs[j1][i][2];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j1][i][m] = fac2 * rhs[k][j1][i][m];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhsp use : RWR
unsolved dependency for arrayAccess rhs use : RW
unsolved dependency for arrayAccess lhsm use : RWR
****************************************/
for(j = 0; j <= grid_points[1] - 3; j++) {
j1 = j + 1;
j2 = j + 2;
// #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(j, k, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
m = 3;
fac1 = 1.0 / lhsp[j][i][2];
lhsp[j][i][3] = fac1 * lhsp[j][i][3];
lhsp[j][i][4] = fac1 * lhsp[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1] * lhsp[j][i][3];
lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1] * lhsp[j][i][4];
rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1] * rhs[k][j][i][m];
lhsp[j2][i][1] = lhsp[j2][i][1] - lhsp[j2][i][0] * lhsp[j][i][3];
lhsp[j2][i][2] = lhsp[j2][i][2] - lhsp[j2][i][0] * lhsp[j][i][4];
rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsp[j2][i][0] * rhs[k][j][i][m];
m = 4;
fac1 = 1.0 / lhsm[j][i][2];
lhsm[j][i][3] = fac1 * lhsm[j][i][3];
lhsm[j][i][4] = fac1 * lhsm[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1] * lhsm[j][i][3];
lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1] * lhsm[j][i][4];
rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1] * rhs[k][j][i][m];
lhsm[j2][i][1] = lhsm[j2][i][1] - lhsm[j2][i][0] * lhsm[j][i][3];
lhsm[j2][i][2] = lhsm[j2][i][2] - lhsm[j2][i][0] * lhsm[j][i][4];
rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsm[j2][i][0] * rhs[k][j][i][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
j = grid_points[1] - 2;
j1 = grid_points[1] - 1;
// #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(j, k, j1, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
m = 3;
fac1 = 1.0 / lhsp[j][i][2];
lhsp[j][i][3] = fac1 * lhsp[j][i][3];
lhsp[j][i][4] = fac1 * lhsp[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1] * lhsp[j][i][3];
lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1] * lhsp[j][i][4];
rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1] * rhs[k][j][i][m];
m = 4;
fac1 = 1.0 / lhsm[j][i][2];
lhsm[j][i][3] = fac1 * lhsm[j][i][3];
lhsm[j][i][4] = fac1 * lhsm[j][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1] * lhsm[j][i][3];
lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1] * lhsm[j][i][4];
rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1] * rhs[k][j][i][m];
//---------------------------------------------------------------------
// Scale the last row immediately
//---------------------------------------------------------------------
rhs[k][j1][i][3] = rhs[k][j1][i][3] / lhsp[j1][i][2];
rhs[k][j1][i][4] = rhs[k][j1][i][4] / lhsm[j1][i][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
j = grid_points[1] - 2;
j1 = grid_points[1] - 1;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, j1, grid_points, lhs, lhsp, lhsm)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j1][i][m];
}
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j1][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j1][i][4];
}
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(j = grid_points[1] - 3; j >= 0; j--) {
j1 = j + 1;
j2 = j + 2;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, grid_points, lhs, lhsp, lhsm)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3] * rhs[k][j1][i][m] - lhs[j][i][4] * rhs[k][j2][i][m];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3] * rhs[k][j1][i][3] - lhsp[j][i][4] * rhs[k][j2][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3] * rhs[k][j1][i][4] - lhsm[j][i][4] * rhs[k][j2][i][4];
}
}
}
pinvr();
}
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the z-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the z-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
void z_solve() {
int i, j, k, k1, k2, m;
double ru1, fac1, fac2;
double rhos[36];
double cv[36];
double lhs[37][37][5];
double lhsp[37][37][5];
double lhsm[37][37][5];
#pragma omp parallel for default(shared) private(j, i, k, m, ru1, k1, k2, fac1, fac2) firstprivate(ny2, nx2, nz2, c3c4, con43, c1c5, dz4, dz5, dzmax, dz1, dttz2, dttz1, c2dttz1, comz5, comz4, comz1, comz6, rho_i, ws, speed, grid_points, lhs, lhsp, lhsm, cv, rhos)
for(j = 1; j <= ny2; j++) {
lhsinitj(nz2 + 1, nx2, lhs, lhsp, lhsm);
//---------------------------------------------------------------------
// Computes the left hand side for the three z-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(i, k, ru1) firstprivate(nx2, nz2, c3c4, j, con43, c1c5, dz4, dz5, dzmax, dz1, dttz2, dttz1, c2dttz1, rho_i, ws, cv, rhos)
for(i = 1; i <= nx2; i++) {
// #pragma omp parallel for default(shared) private(k, ru1) firstprivate(nz2, c3c4, j, i, con43, c1c5, dz4, dz5, dzmax, dz1, rho_i, ws)
for(k = 0; k <= nz2 + 1; k++) {
ru1 = c3c4 * rho_i[k][j][i];
cv[k] = ws[k][j][i];
rhos[k] = ((((dz4 + con43 * ru1) > (dz5 + c1c5 * ru1) ? (dz4 + con43 * ru1) : (dz5 + c1c5 * ru1))) > (((dzmax + ru1) > (dz1) ? (dzmax + ru1) : (dz1))) ? (((dz4 + con43 * ru1) > (dz5 + c1c5 * ru1) ? (dz4 + con43 * ru1) : (dz5 + c1c5 * ru1))) : (((dzmax + ru1) > (dz1) ? (dzmax + ru1) : (dz1))));
}
// #pragma omp parallel for default(shared) private(k) firstprivate(nz2, i, dttz2, dttz1, c2dttz1, cv, rhos)
for(k = 1; k <= nz2; k++) {
lhs[k][i][0] = 0.0;
lhs[k][i][1] = -dttz2 * cv[k - 1] - dttz1 * rhos[k - 1];
lhs[k][i][2] = 1.0 + c2dttz1 * rhos[k];
lhs[k][i][3] = dttz2 * cv[k + 1] - dttz1 * rhos[k + 1];
lhs[k][i][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(i, k) firstprivate(nx2, comz5, comz4, comz1, comz6)
for(i = 1; i <= nx2; i++) {
k = 1;
lhs[k][i][2] = lhs[k][i][2] + comz5;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
k = 2;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
// #pragma omp parallel for default(shared) private(k, i) firstprivate(nz2, nx2, comz1, comz4, comz6)
for(k = 3; k <= nz2 - 2; k++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(nx2, k, comz1, comz4, comz6)
for(i = 1; i <= nx2; i++) {
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
}
// #pragma omp parallel for default(shared) private(i, k) firstprivate(nx2, nz2, comz1, comz4, comz6, comz5)
for(i = 1; i <= nx2; i++) {
k = nz2 - 1;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
k = nz2;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, fill the other factors (u+c), (u-c)
//---------------------------------------------------------------------
// #pragma omp parallel for default(shared) private(k, i) firstprivate(nz2, nx2, dttz2, j, lhs, speed)
for(k = 1; k <= nz2; k++) {
// #pragma omp parallel for default(shared) private(i) firstprivate(nx2, k, dttz2, j, lhs, speed)
for(i = 1; i <= nx2; i++) {
lhsp[k][i][0] = lhs[k][i][0];
lhsp[k][i][1] = lhs[k][i][1] - dttz2 * speed[k - 1][j][i];
lhsp[k][i][2] = lhs[k][i][2];
lhsp[k][i][3] = lhs[k][i][3] + dttz2 * speed[k + 1][j][i];
lhsp[k][i][4] = lhs[k][i][4];
lhsm[k][i][0] = lhs[k][i][0];
lhsm[k][i][1] = lhs[k][i][1] + dttz2 * speed[k - 1][j][i];
lhsm[k][i][2] = lhs[k][i][2];
lhsm[k][i][3] = lhs[k][i][3] - dttz2 * speed[k + 1][j][i];
lhsm[k][i][4] = lhs[k][i][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RWR
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(k = 0; k <= grid_points[2] - 3; k++) {
k1 = k + 1;
k2 = k + 2;
// #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(nx2, k, j)
for(i = 1; i <= nx2; i++) {
fac1 = 1.0 / lhs[k][i][2];
lhs[k][i][3] = fac1 * lhs[k][i][3];
lhs[k][i][4] = fac1 * lhs[k][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1] * lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1] * lhs[k][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1] * rhs[k][j][i][m];
}
lhs[k2][i][1] = lhs[k2][i][1] - lhs[k2][i][0] * lhs[k][i][3];
lhs[k2][i][2] = lhs[k2][i][2] - lhs[k2][i][0] * lhs[k][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhs[k2][i][0] * rhs[k][j][i][m];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
k = grid_points[2] - 2;
k1 = grid_points[2] - 1;
// #pragma omp parallel for default(shared) private(i, m, fac1, fac2) firstprivate(nx2, k, j, k1)
for(i = 1; i <= nx2; i++) {
fac1 = 1.0 / lhs[k][i][2];
lhs[k][i][3] = fac1 * lhs[k][i][3];
lhs[k][i][4] = fac1 * lhs[k][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1] * lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1] * lhs[k][i][4];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1] * rhs[k][j][i][m];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0 / lhs[k1][i][2];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k1][j][i][m] = fac2 * rhs[k1][j][i][m];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhsp use : RWR
unsolved dependency for arrayAccess rhs use : RW
unsolved dependency for arrayAccess lhsm use : RWR
****************************************/
for(k = 0; k <= grid_points[2] - 3; k++) {
k1 = k + 1;
k2 = k + 2;
// #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(nx2, k, j)
for(i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0 / lhsp[k][i][2];
lhsp[k][i][3] = fac1 * lhsp[k][i][3];
lhsp[k][i][4] = fac1 * lhsp[k][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1] * lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1] * lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1] * rhs[k][j][i][m];
lhsp[k2][i][1] = lhsp[k2][i][1] - lhsp[k2][i][0] * lhsp[k][i][3];
lhsp[k2][i][2] = lhsp[k2][i][2] - lhsp[k2][i][0] * lhsp[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsp[k2][i][0] * rhs[k][j][i][m];
m = 4;
fac1 = 1.0 / lhsm[k][i][2];
lhsm[k][i][3] = fac1 * lhsm[k][i][3];
lhsm[k][i][4] = fac1 * lhsm[k][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1] * lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1] * lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1] * rhs[k][j][i][m];
lhsm[k2][i][1] = lhsm[k2][i][1] - lhsm[k2][i][0] * lhsm[k][i][3];
lhsm[k2][i][2] = lhsm[k2][i][2] - lhsm[k2][i][0] * lhsm[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsm[k2][i][0] * rhs[k][j][i][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
k = grid_points[2] - 2;
k1 = grid_points[2] - 1;
// #pragma omp parallel for default(shared) private(i, m, fac1) firstprivate(nx2, k, j, k1)
for(i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0 / lhsp[k][i][2];
lhsp[k][i][3] = fac1 * lhsp[k][i][3];
lhsp[k][i][4] = fac1 * lhsp[k][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1] * lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1] * lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1] * rhs[k][j][i][m];
m = 4;
fac1 = 1.0 / lhsm[k][i][2];
lhsm[k][i][3] = fac1 * lhsm[k][i][3];
lhsm[k][i][4] = fac1 * lhsm[k][i][4];
rhs[k][j][i][m] = fac1 * rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1] * lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1] * lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1] * rhs[k][j][i][m];
//---------------------------------------------------------------------
// Scale the last row immediately (some of this is overkill
// if this is the last cell)
//---------------------------------------------------------------------
rhs[k1][j][i][3] = rhs[k1][j][i][3] / lhsp[k1][i][2];
rhs[k1][j][i][4] = rhs[k1][j][i][4] / lhsm[k1][i][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
k = grid_points[2] - 2;
k1 = grid_points[2] - 1;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, k1, j, lhs, lhsp, lhsm)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3] * rhs[k1][j][i][m];
}
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3] * rhs[k1][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3] * rhs[k1][j][i][4];
}
//---------------------------------------------------------------------
// Whether or not this is the last processor, we always have
// to complete the back-substitution
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(k = grid_points[2] - 3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
// #pragma omp parallel for default(shared) private(i, m) firstprivate(nx2, k, j, lhs, lhsp, lhsm)
for(i = 1; i <= nx2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3] * rhs[k1][j][i][m] - lhs[k][i][4] * rhs[k2][j][i][m];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3] * rhs[k1][j][i][3] - lhsp[k][i][4] * rhs[k2][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3] * rhs[k1][j][i][4] - lhsm[k][i][4] * rhs[k2][j][i][4];
}
}
}
tzetar();
}
void set_constants() {
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
bt = sqrt(0.5);
dnxm1 = 1.0 / (double) (grid_points[0] - 1);
dnym1 = 1.0 / (double) (grid_points[1] - 1);
dnzm1 = 1.0 / (double) (grid_points[2] - 1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0 - c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = ((dx3) > (dx4) ? (dx3) : (dx4));
dymax = ((dy2) > (dy4) ? (dy2) : (dy4));
dzmax = ((dz2) > (dz3) ? (dz2) : (dz3));
dssp = 0.25 * ((dx1) > (((dy1) > (dz1) ? (dy1) : (dz1))) ? (dx1) : (((dy1) > (dz1) ? (dy1) : (dz1))));
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt * tx1;
dttx2 = dt * tx2;
dtty1 = dt * ty1;
dtty2 = dt * ty2;
dttz1 = dt * tz1;
dttz2 = dt * tz2;
c2dttx1 = 2.0 * dttx1;
c2dtty1 = 2.0 * dtty1;
c2dttz1 = 2.0 * dttz1;
dtdssp = dt * dssp;
comz1 = dtdssp;
comz4 = 4.0 * dtdssp;
comz5 = 5.0 * dtdssp;
comz6 = 6.0 * dtdssp;
c3c4tx3 = c3c4 * tx3;
c3c4ty3 = c3c4 * ty3;
c3c4tz3 = c3c4 * tz3;
dx1tx1 = dx1 * tx1;
dx2tx1 = dx2 * tx1;
dx3tx1 = dx3 * tx1;
dx4tx1 = dx4 * tx1;
dx5tx1 = dx5 * tx1;
dy1ty1 = dy1 * ty1;
dy2ty1 = dy2 * ty1;
dy3ty1 = dy3 * ty1;
dy4ty1 = dy4 * ty1;
dy5ty1 = dy5 * ty1;
dz1tz1 = dz1 * tz1;
dz2tz1 = dz2 * tz1;
dz3tz1 = dz3 * tz1;
dz4tz1 = dz4 * tz1;
dz5tz1 = dz5 * tz1;
c2iv = 2.5;
con43 = 4.0 / 3.0;
con16 = 1.0 / 6.0;
xxcon1 = c3c4tx3 * con43 * tx3;
xxcon2 = c3c4tx3 * tx3;
xxcon3 = c3c4tx3 * conz1 * tx3;
xxcon4 = c3c4tx3 * con16 * tx3;
xxcon5 = c3c4tx3 * c1c5 * tx3;
yycon1 = c3c4ty3 * con43 * ty3;
yycon2 = c3c4ty3 * ty3;
yycon3 = c3c4ty3 * conz1 * ty3;
yycon4 = c3c4ty3 * con16 * ty3;
yycon5 = c3c4ty3 * c1c5 * ty3;
zzcon1 = c3c4tz3 * con43 * tz3;
zzcon2 = c3c4tz3 * tz3;
zzcon3 = c3c4tz3 * conz1 * tz3;
zzcon4 = c3c4tz3 * con16 * tz3;
zzcon5 = c3c4tz3 * c1c5 * tz3;
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) {
char size[16];
int j;
printf("\n\n %s Benchmark Completed.\n", name);
printf(" Class = %12c\n", class);
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if((n2 == 0) && (n3 == 0)) {
if((name[0] == 'E') && (name[1] == 'P')) {
sprintf(size, "%15.0lf", pow(2.0, n1));
j = 14;
if(size[j] == '.') {
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf(" Size = %15s\n", size);
}
else {
printf(" Size = %12d\n", n1);
}
}
else {
printf(" Size = %4dx%4dx%4d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Time in seconds = %12.4lf\n", t);
printf(" Mop/s total = %15.2lf\n", mops);
printf(" Operation type = %24s\n", optype);
if(verified) printf(" Verification = %12s\n", "SUCCESSFUL");
else printf(" Verification = %12s\n", "UNSUCCESSFUL");
}
void wtime(double *t) {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *) 0);
if(sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time() {
double t;
wtime(&t);
return (t);
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n) {
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n) {
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n) {
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n) {
return (elapsed[n]);
}
|
array_init.c | // Test the handling of two loops under omp for
// watch the loop index replacement (private by default)
int main(void)
{
int i, j;
float u[500][500];
#pragma omp parallel for
for (i=0; i<500; i++)
for (j=0; j<500; j++)
{
u[i][j] = 0.0;
}
return 0;
}
|
GB_unaryop__minv_uint64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_int32
// op(A') function: GB_tran__minv_uint64_int32
// C type: uint64_t
// A type: int32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_int32
(
uint64_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi_atomic.c | /*
* A parallel pi program using atomic operations
*
* Author: Matt Cufari
* Version: 1.0.0
* Date Created Dec 18 2020
* Date Last Modified Jan 4 2021
*/
#include <stdio.h>
#include <omp.h>
static long num_steps = 1000000;
double step;
#define NUM_THREADS 2
void main(){
double pi;
int nthreads;
step = 1.0/(double) num_steps;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
int i, id, nthrds;
double x, sum;
id = omp_get_thread_num();
nthrds = omp_get_num_threads();
if(id == 0) nthreads = nthrds;
for(i = id, sum=0.0; i < num_steps; i=i+nthreads){
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
}
sum = sum*step;
#pragma atomic
pi += sum;
}
printf("The value of pi is: %f\n", pi);
}
|
polybench.c | /**
* polybench.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#endif
#pragma omp parallel for reduction(+:tmp)
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
{
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* nnew = NULL;
int ret = posix_memalign (&nnew, 32, num);
if (! nnew || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return nnew;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
countsort.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : countsort.c
// Create : 2019-06-21 17:15:17
// Revise : 2019-11-09 10:34:42
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
#include "countsort.h"
#include "edgeList.h"
#include "vertex.h"
#include "myMalloc.h"
#include "graphCSR.h"
struct EdgeList *countSortEdgesBySource (struct EdgeList *edgeList)
{
uint32_t key = 0;
uint32_t pos = 0;
uint32_t num_vertices = edgeList->num_vertices;
uint32_t num_edges = edgeList->num_edges;
uint32_t i = 0;
uint32_t j = 0;
uint32_t P = 1; // 32/8 8 bit radix needs 4 iterations
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t base = 0;
uint32_t *vertex_count = NULL;
struct EdgeList *sorted_edges_array = newEdgeList(num_edges);
#pragma omp parallel default(none) shared(P,vertex_count,sorted_edges_array,edgeList,num_edges,num_vertices) firstprivate(t_id, offset_end,offset_start,base,i,j,key,pos)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
vertex_count = (uint32_t *) my_malloc(P * num_vertices * sizeof(uint32_t));
}
#pragma omp barrier
offset_start = t_id * (num_edges / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (num_edges / P) + (num_edges % P) ;
}
else
{
offset_end = offset_start + (num_edges / P);
}
//HISTOGRAM-KEYS
for(i = 0; i < num_vertices; i++)
{
vertex_count[(t_id * num_vertices) + i] = 0;
}
// count occurrence of key: id of the source vertex
for(i = offset_start; i < offset_end; i++)
{
key = edgeList->edges_array_src[i];
vertex_count[(t_id * num_vertices) + key]++;
}
#pragma omp barrier
//SCAN BUCKETS
if(t_id == 0)
{
for(i = 0; i < num_vertices; i++)
{
for(j = 0 ; j < P; j++)
{
pos = vertex_count[(j * num_vertices) + i];
vertex_count[(j * num_vertices) + i] = base;
base += pos;
}
}
}
#pragma omp barrier
//RANK-AND-PERMUTE
for(i = offset_start; i < offset_end; i++)
{
key = edgeList->edges_array_src[i];
pos = vertex_count[(t_id * num_vertices) + key];
sorted_edges_array->edges_array_dest[pos] = edgeList->edges_array_dest[i];
sorted_edges_array->edges_array_src[pos] = edgeList->edges_array_src[i];
#if WEIGHTED
sorted_edges_array->edges_array_weight[pos] = edgeList->edges_array_weight[i];
#endif
vertex_count[(t_id * num_vertices) + key]++;
}
}
#pragma omp parallel for
for(i = 0; i < num_edges; i++)
{
edgeList->edges_array_dest[i] = sorted_edges_array->edges_array_dest[i];
edgeList->edges_array_src[i] = sorted_edges_array->edges_array_src[i];
#if WEIGHTED
edgeList->edges_array_weight[i] = sorted_edges_array->edges_array_weight[i] ;
#endif
}
return edgeList;
}
struct EdgeList *countSortEdgesByDestination (struct EdgeList *edgeList)
{
uint32_t key = 0;
uint32_t pos = 0;
uint32_t num_vertices = edgeList->num_vertices;
uint32_t num_edges = edgeList->num_edges;
uint32_t i = 0;
uint32_t j = 0;
uint32_t P = 1; // 32/8 8 bit radix needs 4 iterations
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t base = 0;
uint32_t *vertex_count = NULL;
struct EdgeList *sorted_edges_array = newEdgeList(num_edges);
#pragma omp parallel default(none) shared(P,vertex_count,sorted_edges_array,edgeList,num_edges,num_vertices) firstprivate(t_id, offset_end,offset_start,base,i,j,key,pos)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
vertex_count = (uint32_t *) my_malloc( P * num_vertices * sizeof(uint32_t));
}
#pragma omp barrier
offset_start = t_id * (num_edges / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (num_edges / P) + (num_edges % P) ;
}
else
{
offset_end = offset_start + (num_edges / P);
}
//HISTOGRAM-KEYS
for(i = 0; i < num_vertices; i++)
{
vertex_count[(t_id * num_vertices) + i] = 0;
}
// count occurrence of key: id of the source vertex
for(i = offset_start; i < offset_end; i++)
{
key = edgeList->edges_array_dest[i];
vertex_count[(t_id * num_vertices) + key]++;
}
#pragma omp barrier
//SCAN BUCKETS
if(t_id == 0)
{
for(i = 0; i < num_vertices; i++)
{
for(j = 0 ; j < P; j++)
{
pos = vertex_count[(j * num_vertices) + i];
vertex_count[(j * num_vertices) + i] = base;
base += pos;
}
}
}
#pragma omp barrier
//RANK-AND-PERMUTE
for(i = offset_start; i < offset_end; i++)
{
key = edgeList->edges_array_dest[i];
pos = vertex_count[(t_id * num_vertices) + key];
sorted_edges_array->edges_array_dest[pos] = edgeList->edges_array_dest[i];
sorted_edges_array->edges_array_src[pos] = edgeList->edges_array_src[i];
#if WEIGHTED
sorted_edges_array->edges_array_weight[pos] = edgeList->edges_array_weight[i];
#endif
vertex_count[(t_id * num_vertices) + key]++;
}
}
#pragma omp parallel for
for(i = 0; i < num_edges; i++)
{
edgeList->edges_array_dest[i] = sorted_edges_array->edges_array_dest[i];
edgeList->edges_array_src[i] = sorted_edges_array->edges_array_src[i];
#if WEIGHTED
edgeList->edges_array_weight[i] = sorted_edges_array->edges_array_weight[i] ;
#endif
}
free(vertex_count);
freeEdgeList(sorted_edges_array);
return edgeList;
}
struct EdgeList *countSortEdgesBySourceAndDestination (struct EdgeList *edgeList)
{
edgeList = countSortEdgesByDestination (edgeList);
edgeList = countSortEdgesBySource (edgeList);
return edgeList;
}
|
row_major_kernels.h | #ifndef ROW_MAJOR_KERNELS_H
#define ROW_MAJOR_KERNELS_H
extern "C" {
/*!
* Row-Major matrix sketching with CountSketch and Gaussian transforms. Parallelized with OpenMP.
* A is a n*d row-major matrix. S is a r*n CountSketch. C is a row major matrix.
* If m == 0: C has size r * d and we only apply C <- S * A (G is not applied).
* If m > 0: C has size m * d. In this case C <- G * S * A * (1/sqrt(m))
* G is a m*r matrix with elements from the standard normal distribution.
*
* @param d the number of columns of A and C.
* @param m the number of rows of C and G.
* @param n the number of rows of A and columns of S.
* @param r the number of rows of S and columns of G.
* @param A pointer to the array of storing matrix A in row-major format.
* @param C pointer to the array of storing matrix C in row-major format.
*/
void rmcgs( const int d, const int m, const int n, const int r, double *const A, double *const C ) {
CountSketch S( r, n, std::thread::hardware_concurrency() );
S.populate();
if ( m == 0 ) {
S.apply_row_major( d, 1, A, 0, C, 0, r );
} else {
set_value( m, d, C, 0 );
const int block_size = d;
const int n_blocks = static_cast<int>( std::ceil( static_cast<double>( r ) / static_cast<double>( block_size ) ) );
double *_G = new double[m * block_size];
double *_T = new double[block_size * d];
set_value( block_size, d, _T, 0 );
for ( int i = 0; i < n_blocks - 1; ++i ) {
set_randn( m, block_size, _G );
S.apply_row_major( d, 1, A, 0, _T, i * block_size, ( i + 1 ) *block_size );
gemm( m, d, block_size, 1, _G, _T, 1, C );
}
set_randn( m, block_size, _G );
S.apply_row_major( d, 1, A, 0, _T, ( n_blocks - 1 ) * block_size, r );
gemm( m, d, ( r - ( n_blocks - 1 ) *block_size ), 1, _G, _T, 1, C );
double scale_factor = static_cast<double>( 1 ) / sqrt( static_cast<double>( m ) );
scale( m, d, C, scale_factor );
delete[] _T;
delete[] _G;
}
}
/*!
* Computes the squared row norms of the matrix ( A * B ) and stores them in the vector x.
* Parallelized with OpenMP. A and B are dense in row-major format.
*
* @param m the number of rows of A.
* @param n the number of columns of B.
* @param k the number of columns of A and rows of B.
* @param alpha scalar to multiply ( A * B ).
* @param A pointer to the array of storing matrix A in row-major format.
* @param beta scalar to multiply the vector x.
* @param B pointer to the array of storing matrix B in row-major format.
* @param x pointer to the array of storing vector x.
*/
void rmsqn( const int m, const int n, int k, const double alpha, double *const A, const double beta,
double *const B, double *const x ) {
if ( beta != 1 ) {
scale( m, 1, x, beta );
}
if ( alpha == static_cast<double>( 0 ) ) {
return;
}
#pragma omp parallel
{
int i, j, ind, start_row, end_row;
double A_ij, x_i;
double *_C = new double[n];
double *_B, *_A;
int thread_id = omp_get_thread_num();
int n_threads = omp_get_num_threads();
int block_size = static_cast<int>( std::ceil( static_cast<double>( m ) / static_cast<double>( n_threads ) ) );
start_row = block_size * thread_id;
end_row = block_size * ( thread_id + 1 );
end_row = std::min( end_row, m );
for ( i = start_row; i < end_row; ++i ) {
x_i = 0;
_B = & ( B[0] );
_A = &( A[i * k] );
A_ij = _A[0];
#pragma omp simd
for ( j = 0; j < n; ++j ) {
_C[j] = ( A_ij * _B[j] );
}
for ( ind = 1; ind < k; ++ind ) {
A_ij = _A[ind];
_B = & ( B[ind * n] );
#pragma omp simd
for ( j = 0; j < n; ++j ) {
_C[j] += ( A_ij * _B[j] );
}
}
for ( j = 0; j < n; ++j ) {
x_i += _C[j] * _C[j];
}
x[i] += alpha * x_i;
}
delete[] _C;
}
}
/*!
* Computes: B <- B * D in parallel using OpenMP. D is a diagonal matrix
* stored as a vector (DIA format) and B is dense in row-major format.
*
* @param m the number of rows of B.
* @param n the number of columns of B and D.
* @param B pointer to the array of storing matrix B in row-major format.
* @param D pointer to the array of storing the diagonal matrix D.
*/
void rmdsc( const int m, const int n, double *const D, double *const B ) {
double *_B, * _D;
int steps;
#pragma omp parallel for private(_B, steps)
for ( int i = 0; i < n; i += 8 ) {
_D = & ( D[i] );
steps = std::min( 8, n - i );
for ( int j = 0; j < m; ++j ) {
_B = & ( B[i + j * n] );
#pragma omp simd
for ( int k = 0; k < steps; ++k ) {
_B[k] *= _D[k];
}
}
}
}
}
#endif
|
embedded_fehlberg_7_8.c | ////////////////////////////////////////////////////////////////////////////////
// File: embedded_fehlberg_7_8.c //
// Routines: //
// Embedded_Fehlberg_7_8 //
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// //
// Description: //
// The Runge-Kutta-Fehlberg method is an adaptive procedure for approxi- //
// mating the solution of the differential equation y'(x) = f(x,y) with //
// initial condition y(x0) = c. This implementation evaluates f(x,y) //
// thirteen times per step using embedded seventh order and eight order //
// Runge-Kutta estimates to estimate the not only the solution but also //
// the error. //
// The next step size is then calculated using the preassigned tolerance //
// and error estimate. //
// For step i+1, //
// y[i+1] = y[i] + h * (41/840 * k1 + 34/105 * finavalu_temp[5] + 9/35 * finavalu_temp[6] //
// + 9/35 * finavalu_temp[7] + 9/280 * finavalu_temp[8] + 9/280 finavalu_temp[9] + 41/840 finavalu_temp[10] ) //
// where //
// k1 = f( x[i],y[i] ), //
// finavalu_temp[1] = f( x[i]+2h/27, y[i] + 2h*k1/27), //
// finavalu_temp[2] = f( x[i]+h/9, y[i]+h/36*( k1 + 3 finavalu_temp[1]) ), //
// finavalu_temp[3] = f( x[i]+h/6, y[i]+h/24*( k1 + 3 finavalu_temp[2]) ), //
// finavalu_temp[4] = f( x[i]+5h/12, y[i]+h/48*(20 k1 - 75 finavalu_temp[2] + 75 finavalu_temp[3])), //
// finavalu_temp[5] = f( x[i]+h/2, y[i]+h/20*( k1 + 5 finavalu_temp[3] + 4 finavalu_temp[4] ) ), //
// finavalu_temp[6] = f( x[i]+5h/6, y[i]+h/108*( -25 k1 + 125 finavalu_temp[3] - 260 finavalu_temp[4] + 250 finavalu_temp[5] ) ), //
// finavalu_temp[7] = f( x[i]+h/6, y[i]+h*( 31/300 k1 + 61/225 finavalu_temp[4] - 2/9 finavalu_temp[5] //
// + 13/900 finavalu_temp[6]) ) //
// finavalu_temp[8] = f( x[i]+2h/3, y[i]+h*( 2 k1 - 53/6 finavalu_temp[3] + 704/45 finavalu_temp[4] - 107/9 finavalu_temp[5] //
// + 67/90 finavalu_temp[6] + 3 finavalu_temp[7]) ), //
// finavalu_temp[9] = f( x[i]+h/3, y[i]+h*( -91/108 k1 + 23/108 finavalu_temp[3] - 976/135 finavalu_temp[4] //
// + 311/54 finavalu_temp[5] - 19/60 finavalu_temp[6] + 17/6 finavalu_temp[7] - 1/12 finavalu_temp[8]) ), //
// finavalu_temp[10] = f( x[i]+h, y[i]+h*( 2383/4100 k1 - 341/164 finavalu_temp[3] + 4496/1025 finavalu_temp[4] //
// - 301/82 finavalu_temp[5] + 2133/4100 finavalu_temp[6] + 45/82 finavalu_temp[7] + 45/164 finavalu_temp[8] + 18/41 finavalu_temp[9]) ) //
// finavalu_temp[11] = f( x[i], y[i]+h*( 3/205 k1 - 6/41 finavalu_temp[5] - 3/205 finavalu_temp[6] - 3/41 finavalu_temp[7] //
// + 3/41 finavalu_temp[8] + 6/41 finavalu_temp[9]) ) //
// finavalu_temp[12] = f( x[i]+h, y[i]+h*( -1777/4100 k1 - 341/164 finavalu_temp[3] + 4496/1025 finavalu_temp[4] //
// - 289/82 finavalu_temp[5] + 2193/4100 finavalu_temp[6] + 51/82 finavalu_temp[7] + 33/164 finavalu_temp[8] + //
// 12/41 finavalu_temp[9] + finavalu_temp[11]) ) //
// x[i+1] = x[i] + h. //
// //
// The error is estimated to be //
// err = -41/840 * h * ( k1 + finavalu_temp[10] - finavalu_temp[11] - finavalu_temp[12]) //
// The step size h is then scaled by the scale factor //
// scale = 0.8 * | epsilon * y[i] / [err * (xmax - x[0])] | ^ 1/7 //
// The scale factor is further constrained 0.125 < scale < 4.0. //
// The new step size is h := scale * h. //
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// static fp Runge_Kutta(fp (*f)(fp,fp), fp *y, fp x0, fp h) //
// //
// Description: //
// This routine uses Fehlberg's embedded 7th and 8th order methods to //
// approximate the solution of the differential equation y'=f(x,y) with //
// the initial condition y = y[0] at x = x0. The value at x + h is //
// returned in y[1]. The function returns err / h ( the absolute error //
// per step size ). //
// //
// Arguments: //
// fp *f Pointer to the function which returns the slope at (x,y) of //
// integral curve of the differential equation y' = f(x,y) //
// which passes through the point (x0,y[0]). //
// fp y[] On input y[0] is the initial value of y at x, on output //
// y[1] is the solution at x + h. //
// fp x Initial value of x. //
// fp h Step size //
// //
// Return Values: //
// This routine returns the err / h. The solution of y(x) at x + h is //
// returned in y[1]. //
// //
////////////////////////////////////////////////////////////////////////////////
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
#include "master.c" // (in directory)
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <math.h> // (in path provided to compiler) needed by pow, fabs
//======================================================================================================================================================150
// END
//======================================================================================================================================================150
//========================================================================================================================================================================================================200
// PARTICULAR SOLVER FUNCTION
//========================================================================================================================================================================================================200
void
embedded_fehlberg_7_8( fp timeinst,
fp h,
fp *initvalu,
fp *finavalu,
fp *params,
fp *com,
fp *error,
long long *timecopyin,
long long *timecopykernel,
long long *timecopyout)
{
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
static const fp c_1_11 = 41.0 / 840.0;
static const fp c6 = 34.0 / 105.0;
static const fp c_7_8= 9.0 / 35.0;
static const fp c_9_10 = 9.0 / 280.0;
static const fp a2 = 2.0 / 27.0;
static const fp a3 = 1.0 / 9.0;
static const fp a4 = 1.0 / 6.0;
static const fp a5 = 5.0 / 12.0;
static const fp a6 = 1.0 / 2.0;
static const fp a7 = 5.0 / 6.0;
static const fp a8 = 1.0 / 6.0;
static const fp a9 = 2.0 / 3.0;
static const fp a10 = 1.0 / 3.0;
static const fp b31 = 1.0 / 36.0;
static const fp b32 = 3.0 / 36.0;
static const fp b41 = 1.0 / 24.0;
static const fp b43 = 3.0 / 24.0;
static const fp b51 = 20.0 / 48.0;
static const fp b53 = -75.0 / 48.0;
static const fp b54 = 75.0 / 48.0;
static const fp b61 = 1.0 / 20.0;
static const fp b64 = 5.0 / 20.0;
static const fp b65 = 4.0 / 20.0;
static const fp b71 = -25.0 / 108.0;
static const fp b74 = 125.0 / 108.0;
static const fp b75 = -260.0 / 108.0;
static const fp b76 = 250.0 / 108.0;
static const fp b81 = 31.0/300.0;
static const fp b85 = 61.0/225.0;
static const fp b86 = -2.0/9.0;
static const fp b87 = 13.0/900.0;
static const fp b91 = 2.0;
static const fp b94 = -53.0/6.0;
static const fp b95 = 704.0 / 45.0;
static const fp b96 = -107.0 / 9.0;
static const fp b97 = 67.0 / 90.0;
static const fp b98 = 3.0;
static const fp b10_1 = -91.0 / 108.0;
static const fp b10_4 = 23.0 / 108.0;
static const fp b10_5 = -976.0 / 135.0;
static const fp b10_6 = 311.0 / 54.0;
static const fp b10_7 = -19.0 / 60.0;
static const fp b10_8 = 17.0 / 6.0;
static const fp b10_9 = -1.0 / 12.0;
static const fp b11_1 = 2383.0 / 4100.0;
static const fp b11_4 = -341.0 / 164.0;
static const fp b11_5 = 4496.0 / 1025.0;
static const fp b11_6 = -301.0 / 82.0;
static const fp b11_7 = 2133.0 / 4100.0;
static const fp b11_8 = 45.0 / 82.0;
static const fp b11_9 = 45.0 / 164.0;
static const fp b11_10 = 18.0 / 41.0;
static const fp b12_1 = 3.0 / 205.0;
static const fp b12_6 = - 6.0 / 41.0;
static const fp b12_7 = - 3.0 / 205.0;
static const fp b12_8 = - 3.0 / 41.0;
static const fp b12_9 = 3.0 / 41.0;
static const fp b12_10 = 6.0 / 41.0;
static const fp b13_1 = -1777.0 / 4100.0;
static const fp b13_4 = -341.0 / 164.0;
static const fp b13_5 = 4496.0 / 1025.0;
static const fp b13_6 = -289.0 / 82.0;
static const fp b13_7 = 2193.0 / 4100.0;
static const fp b13_8 = 51.0 / 82.0;
static const fp b13_9 = 33.0 / 164.0;
static const fp b13_10 = 12.0 / 41.0;
static const fp err_factor = -41.0 / 840.0;
fp h2_7 = a2 * h;
fp timeinst_temp;
fp* initvalu_temp;
fp** finavalu_temp;
int i;
//======================================================================================================================================================
// TEMPORARY STORAGE ALLOCATION
//======================================================================================================================================================
initvalu_temp= (fp *) malloc(EQUATIONS* sizeof(fp));
finavalu_temp= (fp **) malloc(13* sizeof(fp *));
for (i= 0; i<13; i++){
finavalu_temp[i]= (fp *) malloc(EQUATIONS* sizeof(fp));
}
//======================================================================================================================================================
// EVALUATIONS [UNROLLED LOOP] [SEQUENTIAL DEPENDENCY]
//======================================================================================================================================================
//===================================================================================================
// 1
//===================================================================================================
// save the init values
timeinst_temp = timeinst;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] ;
//printf("initvalu[%d] = %f\n", i, initvalu[i]);
}
#pragma omp target data map (alloc: initvalu[0:EQUATIONS], finavalu[0:EQUATIONS])
{
#ifdef DEBUG
printf("master 1\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[0],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[0], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 2
//===================================================================================================
timeinst_temp = timeinst+h2_7;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h2_7 * (finavalu_temp[0][i]);
initvalu[i] = initvalu_temp[i] + h2_7 * (finavalu_temp[0][i]);
}
#ifdef DEBUG
printf("master 2\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[1],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[1], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 3
//===================================================================================================
timeinst_temp = timeinst+a3*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b31*finavalu_temp[0][i] + b32*finavalu_temp[1][i]);
initvalu[i] = initvalu_temp[i] + h * ( b31*finavalu_temp[0][i] + b32*finavalu_temp[1][i]);
}
#ifdef DEBUG
printf("master 3\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[2],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[2], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 4
//===================================================================================================
timeinst_temp = timeinst+a4*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b41*finavalu_temp[0][i] + b43*finavalu_temp[2][i]) ;
initvalu[i] = initvalu_temp[i] + h * ( b41*finavalu_temp[0][i] + b43*finavalu_temp[2][i]) ;
}
#ifdef DEBUG
printf("master 4\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[3],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[3], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 5
//===================================================================================================
timeinst_temp = timeinst+a5*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b51*finavalu_temp[0][i] + b53*finavalu_temp[2][i] + b54*finavalu_temp[3][i]) ;
initvalu[i] = initvalu_temp[i] + h * ( b51*finavalu_temp[0][i] + b53*finavalu_temp[2][i] + b54*finavalu_temp[3][i]) ;
}
#ifdef DEBUG
printf("master 5\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[4],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[4], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 6
//===================================================================================================
timeinst_temp = timeinst+a6*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b61*finavalu_temp[0][i] + b64*finavalu_temp[3][i] + b65*finavalu_temp[4][i]) ;
initvalu[i] = initvalu_temp[i] + h * ( b61*finavalu_temp[0][i] + b64*finavalu_temp[3][i] + b65*finavalu_temp[4][i]) ;
}
#ifdef DEBUG
printf("master 6\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[5],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[5], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 7
//===================================================================================================
#ifdef DEBUG
printf("master 7\n");
#endif
timeinst_temp = timeinst+a7*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b71*finavalu_temp[0][i] + b74*finavalu_temp[3][i] + b75*finavalu_temp[4][i] + b76*finavalu_temp[5][i]);
initvalu[i] = initvalu_temp[i] + h * ( b71*finavalu_temp[0][i] + b74*finavalu_temp[3][i] + b75*finavalu_temp[4][i] + b76*finavalu_temp[5][i]) ;
}
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[6],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[6], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 8
//===================================================================================================
timeinst_temp = timeinst+a8*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b81*finavalu_temp[0][i] + b85*finavalu_temp[4][i] + b86*finavalu_temp[5][i] + b87*finavalu_temp[6][i]);
initvalu[i] = initvalu_temp[i] + h * ( b81*finavalu_temp[0][i] + b85*finavalu_temp[4][i] + b86*finavalu_temp[5][i] + b87*finavalu_temp[6][i]);
}
#ifdef DEBUG
printf("master 8\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[7],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[7], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 9
//===================================================================================================
timeinst_temp = timeinst+a9*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b91*finavalu_temp[0][i] + b94*finavalu_temp[3][i] + b95*finavalu_temp[4][i] + b96*finavalu_temp[5][i] + b97*finavalu_temp[6][i]+ b98*finavalu_temp[7][i]) ;
initvalu[i] = initvalu_temp[i] + h * ( b91*finavalu_temp[0][i] + b94*finavalu_temp[3][i] + b95*finavalu_temp[4][i] + b96*finavalu_temp[5][i] + b97*finavalu_temp[6][i]+ b98*finavalu_temp[7][i]) ;
}
#ifdef DEBUG
printf("master 9\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[8],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[8], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 10
//===================================================================================================
timeinst_temp = timeinst+a10*h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b10_1*finavalu_temp[0][i] + b10_4*finavalu_temp[3][i] + b10_5*finavalu_temp[4][i] + b10_6*finavalu_temp[5][i] + b10_7*finavalu_temp[6][i] + b10_8*finavalu_temp[7][i] + b10_9*finavalu_temp[8] [i]) ;
initvalu[i] = initvalu_temp[i] + h * ( b10_1*finavalu_temp[0][i] + b10_4*finavalu_temp[3][i] + b10_5*finavalu_temp[4][i] + b10_6*finavalu_temp[5][i] + b10_7*finavalu_temp[6][i] + b10_8*finavalu_temp[7][i] + b10_9*finavalu_temp[8] [i]) ;
}
#ifdef DEBUG
printf("master 10\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[9],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[9], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 11
//===================================================================================================
timeinst_temp = timeinst+h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b11_1*finavalu_temp[0][i] + b11_4*finavalu_temp[3][i] + b11_5*finavalu_temp[4][i] + b11_6*finavalu_temp[5][i] + b11_7*finavalu_temp[6][i] + b11_8*finavalu_temp[7][i] + b11_9*finavalu_temp[8][i]+ b11_10 * finavalu_temp[9][i]);
initvalu[i] = initvalu_temp[i] + h * ( b11_1*finavalu_temp[0][i] + b11_4*finavalu_temp[3][i] + b11_5*finavalu_temp[4][i] + b11_6*finavalu_temp[5][i] + b11_7*finavalu_temp[6][i] + b11_8*finavalu_temp[7][i] + b11_9*finavalu_temp[8][i]+ b11_10 * finavalu_temp[9][i]);
}
#ifdef DEBUG
printf("master 11\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[10],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[10], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 12
//===================================================================================================
timeinst_temp = timeinst;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b12_1*finavalu_temp[0][i] + b12_6*finavalu_temp[5][i] + b12_7*finavalu_temp[6][i] + b12_8*finavalu_temp[7][i] + b12_9*finavalu_temp[8][i] + b12_10 * finavalu_temp[9][i]) ;
initvalu[i] = initvalu_temp[i] + h * ( b12_1*finavalu_temp[0][i] + b12_6*finavalu_temp[5][i] + b12_7*finavalu_temp[6][i] + b12_8*finavalu_temp[7][i] + b12_9*finavalu_temp[8][i] + b12_10 * finavalu_temp[9][i]) ;
}
#ifdef DEBUG
printf("master 12\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[11],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[11], finavalu, sizeof(fp)*EQUATIONS);
//===================================================================================================
// 13
//===================================================================================================
timeinst_temp = timeinst+h;
for(i=0; i<EQUATIONS; i++){
//initvalu_temp[i] = initvalu[i] + h * ( b13_1*finavalu_temp[0][i] + b13_4*finavalu_temp[3][i] + b13_5*finavalu_temp[4][i] + b13_6*finavalu_temp[5][i] + b13_7*finavalu_temp[6][i] + b13_8*finavalu_temp[7][i] + b13_9*finavalu_temp[8][i] + b13_10*finavalu_temp[9][i] + finavalu_temp[11][i]) ;
initvalu[i] = initvalu_temp[i] + h * ( b13_1*finavalu_temp[0][i] + b13_4*finavalu_temp[3][i] + b13_5*finavalu_temp[4][i] + b13_6*finavalu_temp[5][i] + b13_7*finavalu_temp[6][i] + b13_8*finavalu_temp[7][i] + b13_9*finavalu_temp[8][i] + b13_10*finavalu_temp[9][i] + finavalu_temp[11][i]) ;
}
#ifdef DEBUG
printf("master 13\n");
#endif
master( timeinst_temp,
//initvalu_temp,
initvalu,
params,
//finavalu_temp[12],
finavalu,
com,
timecopyin,
timecopykernel,
timecopyout);
memcpy(finavalu_temp[12], finavalu, sizeof(fp)*EQUATIONS);
} // #pragma omp target map
//======================================================================================================================================================
// FINAL VALUE
//======================================================================================================================================================
for(i=0; i<EQUATIONS; i++){
//finavalu[i]= initvalu[i] + h * (c_1_11 * (finavalu_temp[0][i] + finavalu_temp[10][i]) + c6 * finavalu_temp[5][i] + c_7_8 * (finavalu_temp[6][i] + finavalu_temp[7][i]) + c_9_10 * (finavalu_temp[8][i] + finavalu_temp[9][i]) );
finavalu[i]= initvalu_temp[i] + h * (c_1_11 * (finavalu_temp[0][i] + finavalu_temp[10][i]) + c6 * finavalu_temp[5][i] + c_7_8 * (finavalu_temp[6][i] + finavalu_temp[7][i]) + c_9_10 * (finavalu_temp[8][i] + finavalu_temp[9][i]) );
}
//======================================================================================================================================================
// RETURN
//======================================================================================================================================================
for(i=0; i<EQUATIONS; i++){
error[i] = fabs(err_factor * (finavalu_temp[0][i] + finavalu_temp[10][i] - finavalu_temp[11][i] - finavalu_temp[12][i]));
}
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
free(initvalu_temp);
free(finavalu_temp);
}
|
reduction.c | #include <stdio.h>
#define N 1000000ll
#define SUM (N * (N-1)/2)
int main (void)
{
long long a, i;
#pragma omp target parallel map(tofrom: a) shared(a) private(i)
{
#pragma omp master
a = 0;
#pragma omp barrier
#pragma omp for reduction(+:a)
for (i = 0; i < N; i++) {
a += i;
}
// The Sum shall be sum:[0:N]
#pragma omp single
{
if (a != SUM)
printf ("Incorrect result on target = %lld, expected = %lld!\n", a, SUM);
else
printf ("The result is correct on target = %lld!\n", a);
}
}
if (a != SUM){
printf("Fail!\n");
return 1;
}
printf("Success!\n");
return 0;
}
|
work.c | #include "work.h"
#include <stdio.h>
#include <inttypes.h>
/**
* Copy:
* for all threads:
* for (j=0;j<repeats)
* for (i=offset;i<size+offset)
* alla[thread_nr][i]=allb[thread_nr][i]
* resulting in size*repeats*2*sizeof(double) accessed bytes
**/
double copy_(double **alla, double **allb, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads)
{
/* stores the measured time */
double time=0.0;
/* start parallel work */
#pragma omp parallel
{
/* used for pinning threads */
long long mask;
/* used for getting correct data */
int num,i,k;
unsigned long long min,max;
double *a;
double *b;
num=omp_get_thread_num();
if (localAlloc){
a = alla[num];
b = allb[num];
min=offset;
max=size+offset;
}else{
a = alla[0];
b = allb[0];
min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset;
max=min+size/omp_get_num_threads()+offset-1;
}
#ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES
if(pinThreads){
/* pin to correct core */
mask=1<<num;
sched_setaffinity(0,sizeof(long long),&mask);
/* done pinning to correct core */
}
#endif
/* take start time */
#pragma omp barrier
if (num==0)
time=bi_gettime();
#pragma omp barrier
/* repeat measurement for accuracy */
for (k=0;k<repeats;k++)
/* enable aligned access (may increase performance on x86 systems) */
#ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS
#pragma vector aligned
#endif
/* enable nontemporal stores (may increase performance on x86 systems) */
#ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES
#pragma vector nontemporal (a)
#endif
for (i=min;i<max;i++)
{
a[i]=b[i];
}
#pragma omp barrier
/* take end time */
if (num==0)
time=bi_gettime()-time;
}
return time;
}
double sum_(double **alla, double *result, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads)
{
double time=0.0;
double sum;
#pragma omp parallel reduction(+:sum)
{
long long mask;
double *a;
int num,i,k;
unsigned long long min,max;
num=omp_get_thread_num();
if (localAlloc){
a = alla[num];
min=offset;
max=size+offset;
}else{
a = alla[0];
min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset;
max=min+size/omp_get_num_threads()+offset-1;
}
#ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES
if(pinThreads){
/* pin to correct core */
mask=1<<num;
sched_setaffinity(0,sizeof(long long),&mask);
/* done pinning to correct core */
}
#endif
#pragma omp barrier
if (num==0)
time=bi_gettime();
#pragma omp barrier
for (k=0;k<repeats;k++)
#ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS
#pragma vector aligned
#endif
#ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES
#pragma vector nontemporal (a)
#endif
for (i=min;i<max;i++)
{
sum+=a[i];
}
#pragma omp barrier
if (num==0)
time=bi_gettime()-time;
}
result[0]=sum;
return time;
}
double fill_(double **alla, double scalar, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads)
{
double time=0.0;
#pragma omp parallel
{
long long mask;
double *a;
int num,i,k;
unsigned long long min,max;
num=omp_get_thread_num();
if (localAlloc){
a = alla[num];
min=offset;
max=size+offset;
}else{
a = alla[0];
min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset;
max=min+size/omp_get_num_threads()+offset-1;
}
#ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES
if(pinThreads){
/* pin to correct core */
mask=1<<num;
sched_setaffinity(0,sizeof(long long),&mask);
/* done pinning to correct core */
}
#endif
#pragma omp barrier
if (num==0)
time=bi_gettime();
#pragma omp barrier
for (k=0;k<repeats;k++)
#ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS
#pragma vector aligned
#endif
#ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES
#pragma vector nontemporal (a)
#endif
for (i=min;i<max;i++)
{
a[i]=scalar;
}
#pragma omp barrier
if (num==0)
time=bi_gettime()-time;
}
return time;
}
double daxpy_(double **alla, double **allb, double scalar, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads)
{
double time=0.0;
#pragma omp parallel
{
long long mask;
int num,i,k;
unsigned long long min,max;
double *a;
double *b;
num=omp_get_thread_num();
if (localAlloc){
a = alla[num];
b = allb[num];
min=offset;
max=size+offset;
}else{
a = alla[0];
b = allb[0];
min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset;
max=min+size/omp_get_num_threads()+offset-1;
}
#ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES
if(pinThreads){
/* pin to correct core */
mask=1<<num;
sched_setaffinity(0,sizeof(long long),&mask);
/* done pinning to correct core */
}
#endif
#pragma omp barrier
if (num==0)
time=bi_gettime();
#pragma omp barrier
for (k=0;k<repeats;k++)
#ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS
#pragma vector aligned
#endif
#ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES
#pragma vector nontemporal (a)
#endif
for (i=min;i<max;i++)
{
a[i]=a[i]*scalar+b[i];
}
#pragma omp barrier
if (num==0)
time=bi_gettime()-time;
}
return time;
}
|
Graph.h | #ifndef BasicGraph
#define BasicGraph
/*
* Graph.h:
* manage nodes in a neural network model
*
* Created on: Apr 21, 2017
* Author: mszhang
*/
//#include "Eigen/Dense"
#include "Node.h"
#include "MyLib.h"
//using namespace Eigen;
// one Node means a vector
// the col should be 1, because we aimed for NLP only
class Graph {
protected:
vector<PExecute> execs; //backward
vector<PNode> nodes; //forward
vector<PNode> free_nodes;
vector<PNode> finish_nodes;
vector<PNode> all_nodes;
public:
bool train;
public:
Graph() {
execs.clear();
execs.clear();
nodes.clear();
free_nodes.clear();
}
virtual ~Graph() {
int count = execs.size();
for (int idx = 0; idx < count; idx++) {
delete execs[idx];
}
execs.clear();
execs.clear();
nodes.clear();
free_nodes.clear();
}
public:
inline void clearValue(const bool& bTrain = false) {
int count = execs.size();
for (int idx = 0; idx < count; idx++) {
delete execs[idx];
}
execs.clear();
count = nodes.size();
vector<LDG::PTensor> vec_val, vec_loss;
for (int idx = 0; idx < count; idx++) {
nodes[idx]->clearValue();
if(nodes[idx]->node_type != "bucket")
vec_val.push_back(&nodes[idx]->val);
vec_loss.push_back(&nodes[idx]->loss);
//if(nodes[idx]->drop_value > 0)
//vec_drop_mask.push_back(&nodes[idx]->drop_mask);
}
DEV->set(vec_val, 0);
if(bTrain)
DEV->set(vec_loss, 0);
//DEV->set(vec_drop_mask, 1);
nodes.clear();
free_nodes.clear();
finish_nodes.clear();
all_nodes.clear();
train = bTrain;
}
inline void backward() {
int count = execs.size();
for (int idx = count - 1; idx >= 0; idx--) {
execs[idx]->backward();
}
}
inline void addNode(PNode x) {
nodes.push_back(x);
if (x->degree == 0) {
free_nodes.push_back(x);
}
all_nodes.push_back(x);
}
//real executation
inline void compute() {
int free_count = free_nodes.size();
while (free_count > 0) {
vector<PExecute> cur_execs;
int cur_execs_size = 0;
for (int idx = 0; idx < free_count; idx++) {
bool find = false;
for (int idy = 0; idy < cur_execs_size; idy++) {
if (cur_execs[idy]->addNode(free_nodes[idx])) {
find = true;
break;
}
}
if (!find) {
PExecute new_exec = free_nodes[idx]->generate(train);
cur_execs.push_back(new_exec);
cur_execs_size++;
}
}
//execute
//#pragma omp parallel for
for (int idy = 0; idy < cur_execs_size; idy++) {
cur_execs[idy]->forward();
}
for (int idy = 0; idy < cur_execs_size; idy++) {
execs.push_back(cur_execs[idy]);
}
//finished nodes
vector<PNode> new_free_nodes;
for (int idx = 0; idx < free_count; idx++) {
finish_nodes.push_back(free_nodes[idx]);
int parent_count = free_nodes[idx]->parents.size();
for (int idy = 0; idy < parent_count; idy++) {
free_nodes[idx]->parents[idy]->degree--;
if (free_nodes[idx]->parents[idy]->degree == 0) {
new_free_nodes.push_back(free_nodes[idx]->parents[idy]);
}
}
}
// update free nodes
free_nodes.clear();
free_count = new_free_nodes.size();
for (int idx = 0; idx < free_count; idx++) {
free_nodes.push_back(new_free_nodes[idx]);
}
}
if (finish_nodes.size() != all_nodes.size()) {
std::cout << "error: several nodes are not executed, finished: " << finish_nodes.size() << ", all: " << all_nodes.size() << std::endl;
int total_node_num = all_nodes.size();
int unprocessed = 0;
for (int idx = 0; idx < total_node_num; idx++) {
PNode curNode = all_nodes[idx];
if (curNode->degree >= 0) {
curNode->typeEqual(all_nodes[0]);
unprocessed++;
}
}
std::cout << "unprocessed: " << unprocessed << std::endl;
}
}
};
// one very useful function to collect pointers of derived nodes
template<typename DerivedNode>
inline vector<PNode> getPNodes(vector<DerivedNode>& inputs, int size) {
int usedSize = inputs.size();
if (size >= 0 && size < usedSize) usedSize = size;
vector<PNode> pnodes;
for (int idx = 0; idx < usedSize; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
template<typename DerivedNode>
inline vector<PNode> getPNodes(DerivedNode inputs[], int size) {
//int usedSize = inputs.;
//if (size >= 0 && size < usedSize) usedSize = size;
int usedSize = size;
vector<PNode> pnodes;
for (int idx = 0; idx < usedSize; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
template<typename DerivedNode>
inline vector<PNode> getPNodes(vector<DerivedNode>& inputs, int start, int length) {
int end, tmp_end = start + length;
if (tmp_end > inputs.size())
end = inputs.size();
else
end = tmp_end;
//if (size >= 0 && size < usedSize) usedSize = size;
vector<PNode> pnodes;
for (int idx = start; idx < end; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
template<typename DerivedNode>
inline vector<PNode> getPNodes(DerivedNode inputs[], int size, int start, int length) {
int end, tmp_end = start + length;
if (tmp_end > size)
end = size;
else
end = tmp_end;
//if (size >= 0 && size < usedSize) usedSize = size;
vector<PNode> pnodes;
for (int idx = start; idx < end; idx++) {
pnodes.push_back(&(inputs[idx]));
}
return pnodes;
}
#endif
|
trsm_x_csr_u_hi_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = m - 1; r >= 0; r--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++)
{
ALPHA_INT ac = A->col_indx[ai];
if (ac > r)
{
alpha_madde(temp, A->values[ai], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(y[r * ldy + out_y_col], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.