source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
activations.c | #include "activations.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case SELU:
return "selu";
case GELU:
return "gelu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "swish") == 0) return SWISH;
if (strcmp(s, "mish") == 0) return MISH;
if (strcmp(s, "normalize_channels") == 0) return NORM_CHAN;
if (strcmp(s, "normalize_channels_softmax") == 0) return NORM_CHAN_SOFTMAX;
if (strcmp(s, "normalize_channels_softmax_maxval") == 0) return NORM_CHAN_SOFTMAX_MAXVAL;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "relu6") == 0) return RELU6;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "selu") == 0) return SELU;
if (strcmp(s, "gelu") == 0) return GELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case SELU:
return selu_activate(x);
case GELU:
return gelu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR) {}
else if (a == LEAKY) {
#pragma omp parallel for
for (i = 0; i < n; ++i) {
x[i] = leaky_activate(x[i]);
}
}
else if (a == LOGISTIC) {
#pragma omp parallel for
for (i = 0; i < n; ++i) {
x[i] = logistic_activate(x[i]);
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void activate_array_swish(float *x, const int n, float * output_sigmoid, float * output)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
float x_val = x[i];
float sigmoid = logistic_activate(x_val);
output_sigmoid[i] = sigmoid;
output[i] = x_val * sigmoid;
}
}
// https://github.com/digantamisra98/Mish
void activate_array_mish(float *x, const int n, float * activation_input, float * output)
{
const float MISH_THRESHOLD = 20;
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
float x_val = x[i];
activation_input[i] = x_val; // store value before activation
output[i] = x_val * tanh_activate( softplus_activate(x_val, MISH_THRESHOLD) );
}
}
void activate_array_normalize_channels(float *x, const int n, int batch, int channels, int wh_step, float *output)
{
int size = n / channels;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
int k;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) sum += val;
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > 0) val = val / sum;
else val = 0;
output[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
}
void activate_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *output, int use_max_val)
{
int size = n / channels;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
int wh_i = i % wh_step;
int b = i / wh_step;
const float eps = 0.0001;
if (i < size) {
float sum = eps;
float max_val = -FLT_MAX;
int k;
if (use_max_val) {
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
if (val > max_val || k == 0) max_val = val;
}
}
else
max_val = 0;
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
sum += expf(val - max_val);
}
for (k = 0; k < channels; ++k) {
float val = x[wh_i + k * wh_step + b*wh_step*channels];
val = expf(val - max_val) / sum;
output[wh_i + k * wh_step + b*wh_step*channels] = val;
}
}
}
}
void gradient_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *delta)
{
int size = n / channels;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
float grad = 0;
int k;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float d = delta[index];
grad += out*d;
}
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float d = delta[index];
d = d * grad;
delta[index] = d;
}
}
}
}
void gradient_array_normalize_channels(float *x, const int n, int batch, int channels, int wh_step, float *delta)
{
int size = n / channels;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
float grad = 0;
int k;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float d = delta[index];
grad += out*d;
}
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
if (x[index] > 0) {
float d = delta[index];
d = d * grad;
delta[index] = d;
}
}
}
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case RELU6:
return relu6_gradient(x);
case NORM_CHAN:
//return relu_gradient(x);
case NORM_CHAN_SOFTMAX_MAXVAL:
//...
case NORM_CHAN_SOFTMAX:
printf(" Error: should be used custom NORM_CHAN or NORM_CHAN_SOFTMAX-function for gradient \n");
exit(0);
return 0;
case ELU:
return elu_gradient(x);
case SELU:
return selu_gradient(x);
case GELU:
return gelu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta)
{
int i;
#pragma omp parallel for
for(i = 0; i < n; ++i){
delta[i] *= gradient(x[i], a);
}
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cpp#L54-L56
void gradient_array_swish(const float *x, const int n, const float * sigmoid, float * delta)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
float swish = x[i];
delta[i] *= swish + sigmoid[i]*(1 - swish);
}
}
// https://github.com/digantamisra98/Mish
void gradient_array_mish(const int n, const float * activation_input, float * delta)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; ++i) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/commit/093cdfa85d334cbe19a37624c33198f3140109ed
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
float inp = activation_input[i];
const float sp = softplus_activate(inp, MISH_THRESHOLD);
const float grad_sp = 1 - exp(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
}
|
CBasedTraversal.h | /**
* @file CBasedTraversal.h
* @author C. Menges
* @date 26.04.2019
*/
#pragma once
#include "autopas/containers/cellPairTraversals/CellPairTraversal.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/DataLayoutConverter.h"
#include "autopas/utils/ThreeDimensionalMapping.h"
namespace autopas {
/**
* This class provides the base for traversals using base steps based on cell coloring.
*
* @tparam ParticleCell the type of cells
* @tparam PairwiseFunctor The functor that defines the interaction of two particles.
* @tparam dataLayout
* @tparam useNewton3
* @tparam collapseDepth Set the depth of loop collapsion for OpenMP. Loop variables from outer to inner loop: z,y,x
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
int collapseDepth = 3>
class CBasedTraversal : public CellPairTraversal<ParticleCell> {
protected:
/**
* Constructor of the CBasedTraversal.
* @param dims The dimensions of the cellblock, i.e. the number of cells in x,
* y and z direction.
* @param pairwiseFunctor The functor that defines the interaction of two particles.
* @param interactionLength Interaction length (cutoff + skin).
* @param cellLength cell length.
*/
explicit CBasedTraversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor,
const double interactionLength, const std::array<double, 3> &cellLength)
: CellPairTraversal<ParticleCell>(dims),
_interactionLength(interactionLength),
_cellLength(cellLength),
_dataLayoutConverter(pairwiseFunctor) {
for (unsigned int d = 0; d < 3; d++) {
_overlap[d] = std::ceil(_interactionLength / _cellLength[d]);
}
}
/**
* Destructor of CBasedTraversal.
*/
~CBasedTraversal() override = default;
public:
/**
* load Data Layouts required for this Traversal if cells have been set through setCellsToTraverse().
*/
void initTraversal() override {
if (this->_cells) {
auto &cells = *(this->_cells);
#ifdef AUTOPAS_OPENMP
/// @todo find a condition on when to use omp or when it is just overhead
#pragma omp parallel for
#endif
for (size_t i = 0; i < cells.size(); ++i) {
_dataLayoutConverter.loadDataLayout(cells[i]);
}
}
}
/**
* write Data to AoS if cells have been set through setCellsToTraverse().
*/
void endTraversal() override {
if (this->_cells) {
auto &cells = *(this->_cells);
#ifdef AUTOPAS_OPENMP
/// @todo find a condition on when to use omp or when it is just overhead
#pragma omp parallel for
#endif
for (size_t i = 0; i < cells.size(); ++i) {
_dataLayoutConverter.storeDataLayout(cells[i]);
}
}
}
protected:
/**
* The main traversal of the CTraversal.
* @tparam LoopBody type of the loop body
* @param loopBody The body of the loop as a function. Normally a lambda function, that takes as as parameters
* (x,y,z). If you need additional input from outside, please use captures (by reference).
* @param end 3D index until interactions are processed (exclusive).
* @param stride Distance (in cells) to the next cell of the same color.
* @param offset initial offset (in cells) in which cell to start the traversal.
*/
template <typename LoopBody>
inline void cTraversal(LoopBody &&loopBody, const std::array<unsigned long, 3> &end,
const std::array<unsigned long, 3> &stride,
const std::array<unsigned long, 3> &offset = {0ul, 0ul, 0ul});
/**
* This method is called when the color during the traversal has changed.
*
* @param newColor The new current color.
*/
virtual void notifyColorChange(unsigned long newColor){};
/**
* Interaction length (cutoff + skin).
*/
const double _interactionLength;
/**
* cell length in CellBlock3D.
*/
const std::array<double, 3> _cellLength;
/**
* overlap of interacting cells. Array allows asymmetric cell sizes.
*/
std::array<unsigned long, 3> _overlap;
private:
/**
* Data Layout Converter to be used with this traversal
*/
utils::DataLayoutConverter<PairwiseFunctor, dataLayout> _dataLayoutConverter;
};
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
int collapseDepth>
template <typename LoopBody>
inline void CBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, collapseDepth>::cTraversal(
LoopBody &&loopBody, const std::array<unsigned long, 3> &end, const std::array<unsigned long, 3> &stride,
const std::array<unsigned long, 3> &offset) {
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel
#endif
{
const unsigned long numColors = stride[0] * stride[1] * stride[2];
for (unsigned long col = 0; col < numColors; ++col) {
#if defined(AUTOPAS_OPENMP)
#pragma omp single
#endif
{
// barrier at omp for of previous loop iteration, so fine to change it for everyone!
notifyColorChange(col);
// implicit barrier at end of function.
}
std::array<unsigned long, 3> startWithoutOffset(utils::ThreeDimensionalMapping::oneToThreeD(col, stride));
std::array<unsigned long, 3> start(utils::ArrayMath::add(startWithoutOffset, offset));
// intel compiler demands following:
const unsigned long start_x = start[0], start_y = start[1], start_z = start[2];
const unsigned long end_x = end[0], end_y = end[1], end_z = end[2];
const unsigned long stride_x = stride[0], stride_y = stride[1], stride_z = stride[2];
if (collapseDepth == 2) {
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(dynamic, 1) collapse(2)
#endif
for (unsigned long z = start_z; z < end_z; z += stride_z) {
for (unsigned long y = start_y; y < end_y; y += stride_y) {
for (unsigned long x = start_x; x < end_x; x += stride_x) {
// Don't exchange order of execution (x must be last!), it would break other code
loopBody(x, y, z);
}
}
}
} else {
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(dynamic, 1) collapse(3)
#endif
for (unsigned long z = start_z; z < end_z; z += stride_z) {
for (unsigned long y = start_y; y < end_y; y += stride_y) {
for (unsigned long x = start_x; x < end_x; x += stride_x) {
// Don't exchange order of execution (x must be last!), it would break other code
loopBody(x, y, z);
}
}
}
}
}
}
}
} // namespace autopas
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
main.c | #include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
#include <assert.h>
#include <math.h>
#include "mkl.h"
#include "helper.h"
#include "filehandling.h"
#include "eigenvalues.h"
#include "backtransformation.h"
void showHelp();
int main (int argc, char **argv)
{
/**********************
* initialize MPI
**********************/
int numtasks, taskid, len;
char hostname[MPI_MAX_PROCESSOR_NAME];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
MPI_Get_processor_name(hostname, &len);
// store information, necessary to use MPI, in extra struct, that we can pass to function calls
MPIHandle mpiHandle;
mpiHandle.comm = MPI_COMM_WORLD;
mpiHandle.numtasks = numtasks;
mpiHandle.taskid = taskid;
// we don't want to use nested parallelism, because it yields to worse results, if it is not controlled by any intelligence
omp_set_nested(0);
// If we case that we want to end the program while parsing the option (for example when showing the usage hints -h),
// then I don't want to abort the program. So I need this variable to exit it properly
int endProgram = 0;
// size of tridiagonal matrix
int n;
// symmetric tridiagonal matrix T is splitted into diagonal elements D and off-diagonal elements E
double* D = NULL; // diagonal elements
double* E = NULL; // off diagonal elements
// copies of D and E, which are needed to write the output file (we need the original T)
// Note, even if a copy wastes memory, it's much faster then reading the matrix again from file later on when we need it
double *OD, *OE;
// helper variable to easily access current node in tree
EVRepNode* currNode = NULL;
// for time measurement of eigenvalue computation
double tic, toc;
// time measurement for root finding
double rsum = 0, rtic, rtoc;
// time measurement for eigenvector computation
double evsum = 0, evtic, evtoc;
// name of output file
char* outputfile = NULL;
// name of the file, where the user defines which eigenvectors we are going to compute
char* evFile = NULL;
int computeEV = 0; // flag, if there should be any eigenvectors computed at the end
int writeOutput = 0; // flag
// some indices to use in for loops
int i,j,k;
if (taskid == MASTER) {
/**********************
* parse command line arguments
**********************/
// no parameters are given, thus print usage hints and close programm
if (argc == 1) {
showHelp();
endProgram = 1;
goto StartOfAlgorithm;
}
char* inputfile = NULL;
/*
* Scheme to used as specified by option -s
*/
int usedScheme = 1;
n = 1000; // size of predefined matrix
int c;
opterr = 0;
while ((c = getopt (argc, argv, "hi:n:s:e::")) != -1)
switch (c)
{
case 'h':
showHelp();
endProgram = 1;
goto StartOfAlgorithm;
case 'i':
inputfile = optarg;
break;
case 's':
usedScheme = atoi(optarg); // keep in mind, that atoi returns 0, if string is not and integer
if (usedScheme < 1 || usedScheme > 2) {
fprintf (stderr, "Invalid argument for option -s. See help.\n");
MPI_ABORT(MPI_COMM_WORLD, 1);
}
break;
case 'n':
n = atoi(optarg);
if (n < 1) {
fprintf (stderr, "Invalid argument for option -n. See help.\n");
MPI_ABORT(MPI_COMM_WORLD, 1);
}
break;
case 'e':
computeEV = 1;
if (optarg)
evFile = optarg;
break;
case '?':
if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf (stderr,
"Unknown option character `\\x%x'.\n", optopt);
MPI_ABORT(MPI_COMM_WORLD, 1);
default:
MPI_ABORT(MPI_COMM_WORLD, 1);
}
// if there are more than one positional argument
if (argc - optind > 1) {
fprintf (stderr, "Invalid number of positional arguments. See help.\n");
MPI_ABORT(MPI_COMM_WORLD, 1);
}
outputfile = argv[optind];
// print settings
if (inputfile != NULL)
printf("Input file: %s\n", inputfile);
else
printf("Use a matrix of scheme %d with dimension %d\n", usedScheme, n);
if (computeEV) {
if (evFile != NULL)
printf("Compute the eigenvectors defined in: %s\n", evFile);
else
printf("Program will compute all eigenvectors\n");
}
if (outputfile != NULL) {
writeOutput = 1;
printf("Output file: %s\n", outputfile);
}
/**********************
* read or create matrix T
**********************/
/*
* How to store the matrix?
*
* Since this program only deals with symm. tridiagonal matrices as input matrices, we store them as a special case of
* Intel's packed matrix scheme .
* A symmetric tridiagonal matrix has the same sub- and superdiagonal. So we store it in row-major layout as an n array
* of diagonal elements and an (n-1) array of off-diagonal elements.
*/
if (inputfile != NULL) { // read matrix from file
if (readSymmTriadiagonalMatrixFromSparseMTX(inputfile, &D, &E, &n) != 0)
MPI_ABORT(MPI_COMM_WORLD, 2);
} else {
switch (usedScheme) {
case 1:
createMatrixScheme1(&D, &E, n);
break;
case 2:
createMatrixScheme2(&D, &E, n);
break;
}
}
printf("\n");
printf("Number of MPI tasks is: %d\n", numtasks);
for (i = 0; i < n-1; ++i) {
assert(D[i] != 0);
assert(E[i] != 0);
}
assert(D[n-1] != 0);
// create copies of E and D
OD = malloc(n * sizeof(double));
OE = malloc((n-1) * sizeof(double));
memcpy(OD, D, n*sizeof(double));
memcpy(OE, E, (n-1)*sizeof(double));
}
StartOfAlgorithm:
//if (taskid == MASTER)
// printTridiagonalMatrix(D,E,n);
// in case the MASTER tells us we should end here
MPI_Bcast(&endProgram,1,MPI_INT,MASTER,MPI_COMM_WORLD);
if (endProgram == 1) {
MPI_Finalize();
return 0;
}
MPI_Bcast(&writeOutput,1,MPI_INT,MASTER,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
printf(" Task %d is running on node %s, which has %d available processors.\n", taskid, hostname, omp_get_num_procs());
MPI_Barrier(MPI_COMM_WORLD);
/**********************
**********************
* Cuppen's Algorithm to obtain all eigenpairs
**********************
**********************/
tic = omp_get_wtime();
MPI_Bcast(&n,1,MPI_INT,MASTER,MPI_COMM_WORLD);
/**********************
* Divide phase
**********************/
if (taskid == MASTER)
printf("Start divide phase ...\n");
/*
* The goal of the divide phase is to create a binary tree which is as balanced as possible and contains nearly equal sized leaves.
*
* Here is how the splitting works: let p = 7 (numtasks) (smallest power of two greater than 7 is 8)
* In the first stage, only the task with (taskid % 8 == 0) should perform a split (which is the MASTER).
* it should send the result to taskid + 8/2 => 4
*
* In the second stage, only tasks with taskid 8/2 = 4 should perform a split => 0, 4.
* The results are send to nodes with taskid: sender_taskid+4/2 => 2, 6
*
* In the third stage, only tasks with taskid 4/2 = 2 should perform a split => 0, 2, 4, 6. (6 can't perform a split)
* The results are send to nodes with taskid: sender_taskid+2/2 => 1,2,3,4,5,6,7
*
* Note, since 2^(k-1) < p <= 2^k, the minimum depth of each binary tree is k. Our tree will always have depth k.
*/
/*
* How to split the matrix. Assume:
* D = [1,2,3,4,5,6,7,8]
* E = [a,b,c,d,e,f,g]
* If we want to split between 4 and 5, the we want to have the following matrices
* E1 = [a,b,c], E2 = [e,f,g]
* The off diagonal element d would be the beta, which we have to eliminate in the splitting process.
* So, the indices of the off-diagonals in the splitted matrix have the same start index as the diagonal elements but have one less element
*
* So, we split T into T1, T2 (note, the difference in notation: T1 and T2 have an hat on top in the book).
* The last diagonal element in T1 differs from the orignal part by subtracting theta * beta.
* The first diagonal element in T2 differs from the orignal part by subtracting theta^-1 * beta.
*/
// all tasks that have zero remainder when computing (taskid % modulus) do a split in the current stage
// find smallest power of two greater than numtasks
int modulus = 1;
int maxModulus = 1;
// depth of the tree, thus the tree has 'depth' stages with at maximum 2^(depth-1) leaves
int treeDepth = 1;
while (maxModulus < numtasks) {
maxModulus *= 2;
treeDepth++;
}
modulus = maxModulus;
// helper variable
int numSplitStages = treeDepth-1; // number of stages, where splits are performed
/*
* In this struct we store the whole information to reconstruct the eigenvectors.
* Thus, for each node in the tree, the struct must store the information of
* either the whole eigenvector matrix of its T (if leave node) or the vectors
* representing the eigenvector matrix of the rank-one perturbation of its T,
* where T is the tridiagonal matrix assigned to a node in the tree.
*
* Of course, each of these eigenvector matrix representation is only stored in one task.
* If it's not stored on the current node, then we store at least the taskid, which knows,
* where it is stored for all our child nodes.
* Hence, only the MASTER node (root of tree) can reconstruct the whole eigenvector matrix.
*/
EVRepTree evTree = initEVRepTree(treeDepth, numtasks, n);
if (taskid == MASTER)
assert(evTree.t[0].s[0].n == n);
// If this task performs a split, then it stores the taskid of the right child in this array (the left child is the task itself)
// If there is no split, then the value is -1
int rightChild[numSplitStages];
// If this task was splitted in the stage before, then we store here the taskid of the parent, else -1
// More precisely, if there was a split involving this node at stage s, the we store in parent[s] the taskid of the node performing the split
int parent[numSplitStages];
// initialize above arrays
#pragma omp parallel for default(shared) private(i) schedule(static)
for (i = 0; i < numSplitStages; ++i) {
rightChild[i] = -1;
parent[i] = -1;
}
// Note, our goal is to have equally sized leaves
int leafSize, sizeRemainder;
if (taskid == 0) {
leafSize = n / numtasks;
sizeRemainder = n % numtasks;
}
MPI_Bcast(&leafSize,1,MPI_INT,MASTER,MPI_COMM_WORLD);
MPI_Bcast(&sizeRemainder,1,MPI_INT,MASTER,MPI_COMM_WORLD);
if (taskid == MASTER) {
if (leafSize == 0) {
fprintf (stderr, "Leaf Size is too small! Reduce number of tasks.\n");
MPI_ABORT(MPI_COMM_WORLD, 4);
}
printf("Average leaf size will be %.1lf\n", n*1.0/numtasks);
}
// the actual leafsize of the current task
int nl = leafSize + (taskid < sizeRemainder ? 1 : 0); // FIXME: delete me (stored in tree)
// helper variables
// size of T in left resp. right subtree
int n1,n2;
// stage in divide tree
int s = 0;
for (s = 0; modulus > 1; s++) {
assert(s < treeDepth);
// if task is to perform a split of T (Note: in the first stage, only MASTER statisfies the condition
if (taskid % modulus == 0) {
// get current node in tree
currNode = accessNode(&evTree, s, taskid);
rightChild[s] = taskid + modulus/2;
parent[s] = taskid; // left child will stay on this node
n1 = currNode->left->n;
n2 = currNode->right->n;
if (currNode->left != currNode->right) { // an actual split is performed
assert(n2 > 0);
assert(currNode->left->taskid != currNode->right->taskid);
//printf("Task %d: Splits into (Task %d: %d; Task %d: %d) in stage %d\n", taskid, taskid, n1, rightChild[s], n2, s);
assert(s == 0 || parent[s-1] == currNode->parent->taskid);
assert(rightChild[s] == currNode->right->taskid);
// at each split that we perform, we have to keep track of the lost beta entry
// save beta for later conquer phase and modify diagonal elements
currNode->beta = E[n1-1];
/* Chose the theta value */
// last diagonal element of T1 and first of T2
double dl = D[n1-1];
double df = D[n1];
// if dl and df have the same sign
if ((dl > 0 && df > 0) || (dl < 0 && df < 0)) {
// choose theta, such that -theta*beta has the same sign as df,dl
if ((dl * (-currNode->beta)) < 0) // dl,df and -beta have not the same sign
currNode->theta = -1;
else
currNode->theta = 1;
} else {
// choose sign of theta, such that -theta*beta has the same sign as dl
if ((dl * (-currNode->beta)) < 0) // dl and -beta have not the same sign
currNode->theta = -1;
else
currNode->theta = 1;
// choose magnitude of theta, such that severe digit loss is avoided when computing df - beta/theta
// if |beta| < |df| => make beta/theta smaller than beta
if (fabs(currNode->beta) < fabs(df)) // TODO: overflow controls
currNode->theta = 1000*currNode->beta;
else
currNode->theta = currNode->beta / 1000;
}
// modify last diagonal element of T1
D[n1-1] -= currNode->theta * currNode->beta;
// modify first diagonal element of T2
D[n1] -= 1.0/currNode->theta * currNode->beta;
// send size and second half of matrix
MPI_Send(&n2, 1, MPI_INT, rightChild[s], 1, MPI_COMM_WORLD);
MPI_Send(D+n1, n2, MPI_DOUBLE, rightChild[s], 2, MPI_COMM_WORLD);
MPI_Send(E+n1, n2-1, MPI_DOUBLE, rightChild[s], 3, MPI_COMM_WORLD);
} else {
rightChild[s] = -1;
}
}
// if task is receiver of a subtree in this step
if (taskid % modulus != 0 && taskid % (modulus/2) == 0) {
parent[s] = taskid-modulus/2; // I receive the right child produced in this stage, which I will split in the next stage
// receive size of matrix to receive
MPI_Recv(&n, 1, MPI_INT, parent[s], 1, MPI_COMM_WORLD, &status);
// receive matrix
assert(D == NULL && E == NULL);
D = malloc(n * sizeof(double));
E = malloc((n-1) * sizeof(double));
MPI_Recv(D, n, MPI_DOUBLE, parent[s], 2, MPI_COMM_WORLD, &status);
MPI_Recv(E, n-1, MPI_DOUBLE, parent[s], 3, MPI_COMM_WORLD, &status);
}
modulus /= 2;
}
/*
* Some final remarks of the divide phase:
* The size of the current leave is in nl.
* The actual allocated memory is still stored in n (which will be needed in conquer phase)
*/
MPI_Barrier(MPI_COMM_WORLD);
/**********************
* Compute eigenpairs of leaves using QR algorithm
**********************/
if (taskid == MASTER)
printf("Apply QR algorithm on leaves ...\n");
// TODO. make depth of tree big enough to assure that dense matrix Q of leaves can be stored (thus probably split T even on nodes itself)
// get current leaf node in tree
currNode = &(evTree.t[treeDepth-1].s[taskid]);
assert(currNode->n == nl);
// assign D to leaf node
if (n > nl) {
/* since we have to store all D's anyway, we don't wanna store an array that is bigger than
* necessary. We could have shrinked D in the splitting step before, but
* that would have probably caused even more copy operations
*/
currNode->D = malloc(nl*nl * sizeof(double));
memcpy(currNode->D, D, nl*sizeof(double));
myfree(&D);
} else {
assert(nl == n);
currNode->D = D;
D = NULL;
}
// orthonormal where the columns are eigenvectors
currNode->Q = malloc(nl*nl * sizeof(double));
int ret = LAPACKE_dsteqr(LAPACK_ROW_MAJOR, 'I', nl, currNode->D, E, currNode->Q, nl);
assert(ret == 0);
// reset D to L
currNode->L = currNode->D;
double* L = currNode->L; // the reason why I store extra, is because this is easier then find later on the right child, which sends it to the parent node
currNode->D = NULL;
// off-diagonal elements are not needed anymore
myfree(&E);
assert(E == NULL);
// upper stage in the tree only needs first and last line (as explained later)
double* Q1f = currNode->Q; // first row
double* Q1l = currNode->Q + (nl-1)*nl; // last row
// if there was not a single split, then we only need the eigenvectors stored in Q
if (numSplitStages == 0)
goto EndOfAlgorithm;
MPI_Barrier(MPI_COMM_WORLD);
/**********************
* Conquer phase
**********************/
if (taskid == MASTER)
printf("Start Conquer Phase ...\n");
// sizes of Q matrices
int nq1 = nl, nq2;
double *Q2f = NULL, *Q2l = NULL;
int performedMerge = 0; // If I haven't performed a merge yet, then I am not allowed to free Q1l,Q1f, because they still point to the leaf node
// Stage s=numSplitStages-1: stage where leaves are merged (since s=0 is first split stage)
assert(numSplitStages > 0);
for (s = numSplitStages-1; s >= 0; s--) {
currNode = currNode->parent;
assert(currNode != NULL);
// the tree is not completely balanced, so there might be tasks, that haven't performed a split at the bottom stage s
// if task should not compute the spectral decomposition of two leaves
if (currNode->taskid != taskid && currNode->right->taskid == taskid) {
//printf("Task %d: Send info to %d in stage %d\n", taskid, currNode->taskid, s);
// send eigenvalues and necessary part of eigenvectors to parent node in tree
MPI_Send(&nq1, 1, MPI_INT, currNode->taskid, taskid*numtasks+4, MPI_COMM_WORLD);
MPI_Send(L, nq1, MPI_DOUBLE, currNode->taskid, taskid*numtasks+5, MPI_COMM_WORLD);
MPI_Send(Q1f, nq1, MPI_DOUBLE, currNode->taskid, taskid*numtasks+6, MPI_COMM_WORLD);
MPI_Send(Q1l, nq1, MPI_DOUBLE, currNode->taskid, taskid*numtasks+7, MPI_COMM_WORLD);
// this task can't be the master, so there is no work left to do for it
if (performedMerge) { // note, that the leaf nodes don't copy elements into Q1l,Q1f
myfree(&Q1f);
myfree(&Q1l);
} else {
Q1f = NULL;
Q1l = NULL;
}
}
// if task combines two splits in this stage
// Note: if right == left, then the tree was not splitted in this stage (single path)
if (currNode->right != currNode->left) {
// for all tasks, that are leaves of the current node, they can work in parallel on the root finding problem
if (taskid >= currNode->taskid && taskid < (currNode->taskid+currNode->numLeaves)) {
if (currNode->taskid == taskid) {
performedMerge = 1;
// get current node in tree
EVRepNode* leftChild = currNode->left;
assert(leftChild != NULL && leftChild->n == nq1 && leftChild->taskid == taskid);
int rtaskid = currNode->right->taskid; // taskid of right child
// receive size of matrix to receive
MPI_Recv(&nq2, 1, MPI_INT, rtaskid, rtaskid*numtasks+4, MPI_COMM_WORLD, &status);
assert(currNode->n == nq1+nq2);
currNode->D = malloc(currNode->n * sizeof(double));
memcpy(currNode->D, leftChild->L, currNode->n*sizeof(double));
// receive eigenvalues and necessary part of eigenvectors from right child in tree
MPI_Recv(currNode->D+nq1, nq2, MPI_DOUBLE, rtaskid, rtaskid*numtasks+5, MPI_COMM_WORLD, &status);
Q2f = malloc(nq2 * sizeof(double));
Q2l = malloc(nq2 * sizeof(double));
MPI_Recv(Q2f, nq2, MPI_DOUBLE, rtaskid, rtaskid*numtasks+6, MPI_COMM_WORLD, &status);
MPI_Recv(Q2l, nq2, MPI_DOUBLE, rtaskid, rtaskid*numtasks+7, MPI_COMM_WORLD, &status);
//printf("Task %d: Conquer from (Task %d: %d; Task %d: %d)\n", taskid, taskid, nq1, currNode->right->taskid, nq2);
/*
* Compute z, where z is
*
* z = | Q1^T 0 | | e_k |
* | 0 Q2^T | | theta^-1 * e_1 |
*
* Note, I only need the last row of Q1 and the first row of Q2 in order to compute z
*/
currNode->z = computeZ(Q1l, Q2f, nq1, nq2, currNode->theta);
}
// compute root finding in parrallel
// compute eigenvalues lambda_1 of rank-one update: D + beta*theta* z*z^T
// Note, we may not overwrite the diagonal elements in D with the new eigenvalues, since we need those diagonal elements to compute the eigenvectors
rtic = omp_get_wtime();
computeEigenvalues(currNode, mpiHandle);
// if (currNode->taskid == taskid) { // just to debug
// currNode->L = malloc(currNode->n * sizeof(double));
// memcpy(currNode->L, currNode->D, currNode->n*sizeof(double));
// currNode->G = malloc(currNode->n * sizeof(int));
// for (k = 0; k < currNode->n; ++k)
// currNode->G[k] = -1;
// }
rtoc = omp_get_wtime();
rsum += (rtoc - rtic);
if (currNode->taskid == taskid) {
L = currNode->L;
// compute normalization factors
evtic = omp_get_wtime(); // mainly ev extraction task
computeNormalizationFactors(currNode);
evtoc = omp_get_wtime();
evsum += evtoc - evtic;
// printVector(currNode->L, currNode->n);
// printVector(currNode->N, currNode->n);
// printVector(currNode->D, currNode->n);
// printVector(currNode->z, currNode->n);
/*
* It holds that T = W L W^T, where W = QU
* We only have to compute the first and last row of W and send it to the parent
*
* left child: the parent needs the last row of W (which is Q1 in parent) to compute z.
* To compute the last row of W we only need the last row of Q (last row of Q2)
*
* right child: the parent needs the first row of W (which is Q2 in parent) to compute z.
* To compute the first row of W we only need the first row of Q (first row of Q1)
*
* But, the parent has (if s > 1) to compute the last and first row of its W again, so it needs
* also the first row of its Q1 from its left child resp. the last row of its Q2 from the right child
*/
if (s == 0) { // if we already reached root of tree
assert(taskid == MASTER);
// write eigenvalues into file
if (s < numSplitStages-1) { // note, that the leaf nodes don't copy elements into Q1l,Q1f
myfree(&Q1f);
myfree(&Q1l);
} else {
Q1f = NULL;
Q1l = NULL;
}
myfree(&Q2f);
myfree(&Q2l);
goto EndOfAlgorithm;
}
// compute first and last row of W
double* Wf = malloc((nq1+nq2) * sizeof(double)); // first line of W
double* Wl = malloc((nq1+nq2) * sizeof(double)); // last line of W
#pragma omp parallel private(i,j) // parallel region to ensure, that each thread has another array allocated for the eigenvector
{
// store i-th eigenvector of U
double* ev = malloc(currNode->n * sizeof(double));
#pragma omp for private(evtic, evtoc) reduction(+:evsum)
for (i = 0; i < nq1+nq2; ++i) {
// get i-th eigenvector of U
evtic = omp_get_wtime();
getEigenVector(currNode, ev, i);
evtoc = omp_get_wtime();
if (omp_get_thread_num() == 0) // we want to measure the sequential runtime, not the runtime accumulated from each task
evsum += evtoc - evtic;
Wf[i] = 0;
for (j = 0; j < nq1; ++j)
Wf[i] += Q1f[j] * ev[j];
Wl[i] = 0;
for (j = 0; j < nq2; ++j)
Wl[i] += Q2l[j] * ev[nq1+j];
}
free(ev);
}
// if (s==0) {
// printVector(Wf, nq1+nq2);
// printVector(Wl, nq1+nq2);
// goto EndOfAlgorithm;
// }
if (s < numSplitStages-1) { // note, that the leaf nodes don't copy elements into Q1l,Q1f
myfree(&Q1f);
myfree(&Q1l);
} else {
Q1f = NULL;
Q1l = NULL;
}
myfree(&Q2f);
myfree(&Q2l);
// update variables for next iteration
nq1 = nq1 + nq2;
Q1f = Wf;
Q1l = Wl;
Wf = NULL;
Wl = NULL;
}
}
}
}
/**********************
* End of algorithm
**********************/
EndOfAlgorithm:
toc = omp_get_wtime();
if (taskid == MASTER) {
double elapsedTime = toc-tic;
printf("\n");
printf("Required time to compute all eigenvalues: %f seconds\n", elapsedTime);
printf("Required time for root finding: %f seconds; fraction: %.1f%%\n", rsum, 100*rsum/elapsedTime);
printf("Required time for eigenvector extraction from U_i's: %f seconds; fraction: %.1f%%\n", evsum, 100*evsum/elapsedTime);
}
if (writeOutput) {
if (taskid == MASTER) {
printf("\n");
printf("Write results to file ...\n");
}
MPI_Barrier(MPI_COMM_WORLD);
writeResults(outputfile,OD,OE,&evTree, mpiHandle, computeEV, evFile);
}
freeEVRepTree(&evTree);
//MPI_Barrier(MPI_COMM_WORLD);
if (taskid == MASTER)
printf("\nProgram finished successfully!\n");
MPI_FINALIZE();
return 0;
}
/**
* @brief showHelp Show usage details to the user
*/
void showHelp() {
printf("\n");
printf("USAGE cuppens [options] [outputfile]\n");
printf("\n");
printf("The program can compute all the eigenpairs of a matrix on a parallel machine\n");
printf("by using cuppens algorithm\n");
printf("The results can be written into an outputfile, if specified.\n");
printf("\n");
printf("OPTIONS\n");
printf(" -h\n");
printf(" Show help.\n");
printf(" -i FILENAME\n");
printf(" The name of a file which contains a tridiagonal matrix in mtx format.\n");
printf(" The eigenvalues of this matrix will then be computed.\n");
printf(" -s NUM\n");
printf(" If you want to compute the eigenvalues of a predefined matrix, you may\n");
printf(" use this option to define the scheme of the matrix.\n");
printf(" 1 - Matrix will have the tridiagonal form [-1,d_i,-1] where the diagonal\n");
printf(" elements will be evenly spaced in the interval [1,100] \n");
printf(" 2 - Eigenvalue i has the form: 2 + 2*cos((PI*i)/(n+1)) \n");
printf(" Poisson-matrix (tridiagonal form of [-1,2-1])\n");
printf(" If option i is used, then this option will be ignored.\n");
printf(" -n NUM\n");
printf(" Specify the dimension of the matrix chosen with option -s.\n");
printf(" -e(FILENAME)\n");
printf(" Without this option, no eigenvectors are computed, just the eigenvalues.\n");
printf(" If you just specify the flag -e, then all eigenvectors will be computed.\n");
printf(" If you specify additionally a filename, then it will read the indices\n");
printf(" of the eigenvectors to compute from this file (each line one index).\n");
printf(" Note, there is no blank between the option and the filename.\n");
printf("\n");
}
|
LRUCache_Prefetch.h | #include <iostream>
#include<stdint.h>
#include <unordered_map>
#include <vector>
using namespace std;
vector<int64_t>List_offset;
struct AIOReadInfo
{
int64_t readlength;
int64_t readoffset;
int64_t listlength;
int64_t offsetForenums;
int64_t memoffset;
int64_t curSendpos;
uint8_t *list_data;
uint32_t termid;
};
vector<int64_t>curReadpos;
vector<int64_t>usedFreq;
const uint64_t DISK_BLOCK = 4096;
const int64_t READ_BLOCK = 64 * 1024;
struct Node{
AIOReadInfo aiodata;
Node*prev, *next;
};
int64_t CACHE_SIZE = 1024 * 1024;
class LRUCache{
public:
LRUCache();
~LRUCache();
Node* Put(unsigned key);
Node* Get(unsigned key, bool& flag);
Node* Put_Prefetch(unsigned key);
Node* Get_Prefetch(unsigned key, bool& flag);
void print();
uint64_t hit_size;
uint64_t miss_size;
uint64_t hit_count;
uint64_t miss_count;
void attach(Node *node);
void detach(Node *node);
AIOReadInfo calAioreadinfo(unsigned term);
unordered_map<unsigned, Node*>hashmap_;
Node*head_, *tail_;
int64_t sumBytes;
};
LRUCache::LRUCache()
{
miss_size = 0; hit_size = 0;
miss_count = 0; hit_count = 0;
head_ = new Node;
tail_ = new Node;
head_->prev = NULL;
head_->next = tail_;
tail_->prev = head_;
tail_->next = NULL;
sumBytes = 0;
}
LRUCache::~LRUCache()
{
delete head_;
delete tail_;
}
AIOReadInfo LRUCache::calAioreadinfo(unsigned term)
{
AIOReadInfo tmpaio;
tmpaio.termid = term;
int64_t listlength = List_offset[term + 1] - List_offset[term];
tmpaio.listlength = listlength;
tmpaio.memoffset = 0;
int64_t offset = List_offset[term];
tmpaio.readoffset = ((int64_t)(offset / DISK_BLOCK))*DISK_BLOCK;
tmpaio.offsetForenums = offset - tmpaio.readoffset;
int64_t readlength = ((int64_t)(ceil((double)(listlength + tmpaio.offsetForenums) / READ_BLOCK)))*READ_BLOCK;
tmpaio.readlength = readlength;
tmpaio.curSendpos = -tmpaio.offsetForenums;
curReadpos[term] = -tmpaio.offsetForenums;
#pragma omp flush(curReadpos)
miss_size += tmpaio.listlength;
return tmpaio;
}
Node* LRUCache::Put(unsigned key)
{
AIOReadInfo tmpaio = calAioreadinfo(key);
Node *node;
if (tmpaio.readlength> CACHE_SIZE)
{
cout << "That block overflow!!" << endl;
return NULL;
}
node = tail_->prev;
while (sumBytes + tmpaio.readlength>CACHE_SIZE)
{
if (node == head_){ node = tail_->prev; }
#pragma omp flush(usedFreq)
if (usedFreq[node->aiodata.termid] > 0){ node = node->prev; continue; }
detach(node);
free(node->aiodata.list_data);
curReadpos[node->aiodata.termid] = node->aiodata.offsetForenums;
sumBytes -= node->aiodata.readlength;
hashmap_.erase(node->aiodata.termid);
Node *tmp = node->prev;
delete node;
node = tmp;
}
node = new Node();
posix_memalign((void**)&tmpaio.list_data, DISK_BLOCK, tmpaio.readlength);
node->aiodata = tmpaio;
sumBytes += tmpaio.readlength;
attach(node);
hashmap_[key] = node;
return node;
}
Node* LRUCache::Put_Prefetch(unsigned key)
{
AIOReadInfo tmpaio = calAioreadinfo(key);
Node *node;
if (tmpaio.readlength> CACHE_SIZE)
{
cout << "That block overflow!!" << endl;
return NULL;
}
node = tail_->prev;
while (sumBytes + tmpaio.readlength>CACHE_SIZE&&node != head_)
{
#pragma omp flush(usedFreq)
if (usedFreq[node->aiodata.termid] > 0){ node = node->prev; continue; }
detach(node);
free(node->aiodata.list_data);
curReadpos[node->aiodata.termid] = node->aiodata.offsetForenums;
sumBytes -= node->aiodata.readlength;
hashmap_.erase(node->aiodata.termid);
Node *tmp = node->prev;
delete node;
node = tmp;
}
if (node == head_)
{
return NULL;
}
node = new Node();
posix_memalign((void**)&tmpaio.list_data, DISK_BLOCK, tmpaio.readlength);
node->aiodata = tmpaio;
sumBytes += tmpaio.readlength;
attach(node);
hashmap_[key] = node;
return node;
}
Node* LRUCache::Get(unsigned key, bool &flag)
{
Node *node;
unordered_map<unsigned, Node* >::iterator it = hashmap_.find(key);
if (it != hashmap_.end())
{
node = it->second;
flag = true;
hit_count++;
detach(node);
attach(node);
}
else
{
flag = false;
miss_count++;
node = Put(key);
}
return node;
}
Node* LRUCache::Get_Prefetch(unsigned key, bool &flag)
{
Node *node;
unordered_map<unsigned, Node* >::iterator it = hashmap_.find(key);
if (it != hashmap_.end())
{
node = it->second;
flag = true;
detach(node);
attach(node);
}
else
{
flag = false;
miss_count++;
node = Put_Prefetch(key);
}
return node;
}
void LRUCache::attach(Node *node)
{
node->next = head_->next;
head_->next = node;
node->next->prev = node;
node->prev = head_;
}
void LRUCache::detach(Node *node)
{
node->prev->next = node->next;
node->next->prev = node->prev;
}
void LRUCache::print()
{
unordered_map<unsigned, Node* >::iterator iter;
int64_t mysumsize = 0;
for (iter = hashmap_.begin(); iter != hashmap_.end(); iter++)
{
mysumsize += iter->second->aiodata.listlength;
}
cout << "sumsize=" << mysumsize << endl;
}
|
omp_task_nest_tied.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int fib(int n) {
int a, b;
if (n < 2) {
return n;
} else {
if(n < 4) {
return fib(n - 1) + fib(n - 2);
} else {
#pragma omp task shared(a)
{
a = fib(n - 1);
}
#pragma omp task shared(b)
{
b = fib(n - 2);
}
#pragma omp taskwait
return a + b;
}
}
}
int fib_seq(int n) {
int a, b;
if (n < 2) {
return n;
} else {
a = fib_seq(n - 1);
b = fib_seq(n - 2);
return a + b;
}
}
int main() {
int i;
int n = 20;
int num_failed = 0;
for(i = 0; i < REPETITIONS; i++) {
int task_val = 0;
int seq_val = fib_seq(n);
#pragma omp parallel shared(task_val) firstprivate(n)
#pragma omp master
{
task_val = fib(n);
}
if(seq_val != task_val) {
printf("[%d] Failed: fib(%d) = %d (ANS = %d)\n", i, n, task_val, seq_val);
num_failed++;
}
}
return num_failed;
}
|
bml_normalize_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_normalize.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_ellpack.h"
#include "bml_allocate_ellpack.h"
#include "bml_normalize_ellpack.h"
#include "bml_scale_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Normalize ellpack matrix given Gershgorin bounds.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param mineval Calculated min value
* \param maxeval Calculated max value
*/
void TYPED_FUNC(
bml_normalize_ellpack) (
bml_matrix_ellpack_t * A,
double mineval,
double maxeval)
{
double maxminusmin = maxeval - mineval;
double gershfact = maxeval / maxminusmin;
REAL_T scalar = (REAL_T) - 1.0 / maxminusmin;
double threshold = 0.0;
bml_scale_inplace_ellpack(&scalar, A);
bml_add_identity_ellpack(A, gershfact, threshold);
}
void *TYPED_FUNC(
bml_accumulate_offdiag_ellpack) (
bml_matrix_ellpack_t * A,
int include_diag)
{
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
REAL_T *offdiag_sum = calloc(N, sizeof(REAL_T));
REAL_T *A_value = (REAL_T *) A->value;
for (int i = 0; i < N; i++)
{
double radius = 0.0;
for (int j = 0; j < A_nnz[i]; j++)
{
int ind = ROWMAJOR(i, j, N, M);
if ((i != A_index[ind]) || include_diag)
radius += (double) ABS(A_value[ind]);
}
offdiag_sum[i] = radius;
}
return offdiag_sum;
}
/** Calculate Gershgorin bounds for an ellpack matrix.
*
* \ingroup normalize_group
*
* \param A The matrix
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_ellpack) (
bml_matrix_ellpack_t * A)
{
REAL_T radius, absham, dvalue;
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
REAL_T rad[N];
REAL_T dval[N];
REAL_T *A_value = (REAL_T *) A->value;
#ifdef USE_OMP_OFFLOAD
#pragma omp target update from(A_nnz[:N], A_index[:N*M], A_value[:N*M])
#endif
#pragma omp parallel for \
shared(N, M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(rad, dval) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
radius = 0.0;
dvalue = 0.0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (i == A_index[ROWMAJOR(i, j, N, M)])
dvalue = A_value[ROWMAJOR(i, j, N, M)];
else
{
absham = ABS(A_value[ROWMAJOR(i, j, N, M)]);
radius += (double) absham;
}
}
dval[i] = dvalue;
rad[i] = radius;
/*
emax =
(emax >
REAL_PART(dvalue + radius) ? emax : REAL_PART(dvalue + radius));
emin =
(emin <
REAL_PART(dvalue - radius) ? emin : REAL_PART(dvalue - radius));
*/
}
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
if (REAL_PART(dval[i] + rad[i]) > emax)
emax = REAL_PART(dval[i] + rad[i]);
if (REAL_PART(dval[i] - rad[i]) < emin)
emin = REAL_PART(dval[i] - rad[i]);
}
//printf("%d: emin = %e emax = %e\n", myRank, emin, emax);
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_minRealReduce(&emin);
bml_maxRealReduce(&emax);
}
#endif
eval[0] = emin;
eval[1] = emax;
//printf("Global %d: emin = %e emax = %e\n", myRank, emin, emax);
return eval;
}
/** Calculate Gershgorin bounds for a partial ellpack matrix.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param nrows Number of rows to use
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_partial_ellpack) (
bml_matrix_ellpack_t * A,
int nrows)
{
REAL_T radius, absham, dvalue;
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
REAL_T rad[N];
REAL_T dval[N];
REAL_T *A_value = (REAL_T *) A->value;
#ifdef USE_OMP_OFFLOAD
#pragma omp target update from(A_nnz[:N], A_index[:N*M], A_value[:N*M])
#endif
#pragma omp parallel for \
shared(N, M, A_nnz, A_index, A_value) \
shared(rad, dval) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
for (int i = 0; i < nrows; i++)
{
radius = 0.0;
dvalue = 0.0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (i == A_index[ROWMAJOR(i, j, N, M)])
dvalue = A_value[ROWMAJOR(i, j, N, M)];
else
{
absham = ABS(A_value[ROWMAJOR(i, j, N, M)]);
radius += (double) absham;
}
}
dval[i] = dvalue;
rad[i] = radius;
}
for (int i = 0; i < nrows; i++)
{
if (REAL_PART(dval[i] + rad[i]) > emax)
emax = REAL_PART(dval[i] + rad[i]);
if (REAL_PART(dval[i] - rad[i]) < emin)
emin = REAL_PART(dval[i] - rad[i]);
}
eval[0] = emin;
eval[1] = emax;
return eval;
}
|
re_model_template.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_RE_MODEL_TEMPLATE_H_
#define GPB_RE_MODEL_TEMPLATE_H_
#define _USE_MATH_DEFINES // for M_PI
#include <cmath>
#include <GPBoost/log.h>
#include <GPBoost/type_defs.h>
#include <GPBoost/re_comp.h>
#include <GPBoost/sparse_matrix_utils.h>
#include <GPBoost/Vecchia_utils.h>
#include <GPBoost/GP_utils.h>
//#include <Eigen/src/misc/lapack.h>
#include <memory>
#include <mutex>
#include <vector>
#include <algorithm> // std::shuffle
#include <random> // std::default_random_engine
//#include <typeinfo> // Only needed for debugging
#include <chrono> // only needed for debugging
#include <thread> // only needed for debugging
//Log::Info("Fine here ");// Only for debugging
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
namespace GPBoost {
/*!
* \brief Template class used in the wrapper class REModel
* The template parameters T1 and T2 can either be <sp_mat_t, chol_sp_mat_t> or <den_mat_t, chol_den_mat_t>
*/
template<typename T1, typename T2>
class REModelTemplate {
public:
/*! \brief Null costructor */
REModelTemplate();
/*!
* \brief Costructor
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of random effects / Gaussian processes (same values = same process realization)
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param num_re_group Number of grouped (intercept) random effects
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_re_group_rand_coef Number of grouped random coefficient
* \param num_gp Number of (intercept) Gaussian processes
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian process. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006)
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance, irrelevant for some covariance functions such as the exponential or Gaussian)
* \param vecchia_approx If true, the Veccia approximation is used for the Gaussian process
* \param num_neighbors The number of neighbors used in the Vecchia approximation
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions
*/
REModelTemplate(data_size_t num_data, const gp_id_t* cluster_ids_data = nullptr, const char* re_group_data = nullptr,
data_size_t num_re_group = 0, const double* re_group_rand_coef_data = nullptr,
const int32_t* ind_effect_group_rand_coef = nullptr, data_size_t num_re_group_rand_coef = 0,
data_size_t num_gp = 0, const double* gp_coords_data = nullptr, int dim_gp_coords = 2,
const double* gp_rand_coef_data = nullptr, data_size_t num_gp_rand_coef = 0,
const char* cov_fct = nullptr, double cov_fct_shape = 0., bool vecchia_approx = false, int num_neighbors = 30,
const char* vecchia_ordering = nullptr, const char* vecchia_pred_type = nullptr, int num_neighbors_pred = 30) {
num_cov_par_ = 1;
CHECK(num_data > 0);
num_data_ = num_data;
vecchia_approx_ = vecchia_approx;
//Set up GP IDs
SetUpGPIds(num_data_, cluster_ids_data, num_data_per_cluster_, data_indices_per_cluster_, unique_clusters_, num_clusters_);
//Indices of parameters of individual components in joint parameter vector
ind_par_.push_back(0);//0+1 is starting point of parameter for first component since the first parameter is the nugget effect variance
num_comps_total_ = 0;
//Do some checks for grouped RE components and set meta data (number of components etc.)
std::vector<std::vector<string_t>> re_group_levels;//Matrix with group levels for the grouped random effects (re_group_levels[j] contains the levels for RE number j)
if (num_re_group > 0) {
if (vecchia_approx) {
Log::Fatal("The Veccia approximation cannot be used when there are grouped random effects (in the current implementation).");
}
num_re_group_ = num_re_group;
CHECK(re_group_data != nullptr);
if (num_re_group_rand_coef > 0) {
num_re_group_rand_coef_ = num_re_group_rand_coef;
CHECK(re_group_rand_coef_data != nullptr);
CHECK(ind_effect_group_rand_coef != nullptr);
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
CHECK(0 < ind_effect_group_rand_coef[j] && ind_effect_group_rand_coef[j] <= num_re_group_);
}
ind_effect_group_rand_coef_ = std::vector<int>(ind_effect_group_rand_coef, ind_effect_group_rand_coef + num_re_group_rand_coef_);
}
num_re_group_total_ = num_re_group_ + num_re_group_rand_coef_;
num_cov_par_ += num_re_group_total_;
num_comps_total_ += num_re_group_total_;
//Add indices of parameters of individual components in joint parameter vector
for (int j = 0; j < num_re_group_total_; ++j) {
ind_par_.push_back(1 + j);//end points of parameter indices of components
}
// Convert characters in 'const char* re_group_data' to matrix (num_re_group_ x num_data_) with strings of group labels
re_group_levels = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_));
if (num_re_group_ > 0) {
ConvertCharToStringGroupLevels(num_data_, num_re_group_, re_group_data, re_group_levels);
}
}
//Do some checks for GP components and set meta data (number of components etc.)
if (num_gp > 0) {
if (num_gp > 2) {
Log::Fatal("num_gp can only be either 0 or 1 in the current implementation");
}
num_gp_ = num_gp;
ind_intercept_gp_ = num_comps_total_;
CHECK(dim_gp_coords > 0);
CHECK(gp_coords_data != nullptr);
CHECK(cov_fct != nullptr);
dim_gp_coords_ = dim_gp_coords;
cov_fct_ = std::string(cov_fct);
cov_fct_shape_ = cov_fct_shape;
if (vecchia_approx) {
Log::Info("Starting nearest neighbor search for Vecchia approximation");
CHECK(num_neighbors > 0);
num_neighbors_ = num_neighbors;
CHECK(num_neighbors_pred > 0);
num_neighbors_pred_ = num_neighbors_pred;
if (vecchia_ordering == nullptr) {
vecchia_ordering_ = "none";
}
else {
vecchia_ordering_ = std::string(vecchia_ordering);
CHECK(vecchia_ordering_ == "none" || vecchia_ordering_ == "random");
}
if (vecchia_pred_type == nullptr) {
vecchia_pred_type_ = "order_obs_first_cond_obs_only";
}
else {
vecchia_pred_type_ = std::string(vecchia_pred_type);
if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) {
Log::Fatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_.c_str());
}
}
}
if (num_gp_rand_coef > 0) {//Random slopes
CHECK(gp_rand_coef_data != nullptr);
num_gp_rand_coef_ = num_gp_rand_coef;
}
num_gp_total_ = num_gp_ + num_gp_rand_coef_;
num_cov_par_ += (2 * num_gp_total_);
num_comps_total_ += num_gp_total_;
//Add indices of parameters of individual components in joint parameter vector
for (int j = 0; j < num_gp_total_; ++j) {
ind_par_.push_back(ind_par_.back() + 2);//end points of parameter indices of components
}
if (vecchia_approx) {
double num_mem_d = ((double)num_gp_total_) * ((double)num_data_) * ((double)num_neighbors_) * ((double)num_neighbors_);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 8000) {
Log::Warning("The current implementation of the Vecchia approximation is not optimized for memory usage. In your case (num. obs. = %d and num. neighbors = %d), at least approximately %d mb of memory is needed. If this is a problem, contact the developer of this package and ask to implement this feature.", num_data_, num_neighbors_, mem_size);
}
}
}
if (num_re_group_ > 0 && num_gp_total_ == 0) {
do_symbolic_decomposition_ = true;//Symbolic decompostion is only done if sparse matrices are used
use_woodbury_identity_ = true;//Faster to use Woodbury identity since the dimension of the random effects is typically much smaller than the number of data points
}
else {
do_symbolic_decomposition_ = false;
use_woodbury_identity_ = false;
}
//Create RE/GP component models
for (const auto& cluster_i : unique_clusters_) {
std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i;
if (vecchia_approx_) {
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
CreateREComponentsVecchia(num_data_, data_indices_per_cluster_, cluster_i, num_data_per_cluster_,
gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i, vecchia_ordering_, num_neighbors_);
nearest_neighbors_.insert({ cluster_i, nearest_neighbors_cluster_i });
dist_obs_neighbors_.insert({ cluster_i, dist_obs_neighbors_cluster_i });
dist_between_neighbors_.insert({ cluster_i, dist_between_neighbors_cluster_i });
entries_init_B_.insert({ cluster_i, entries_init_B_cluster_i });
entries_init_B_grad_.insert({ cluster_i, entries_init_B_grad_cluster_i });
z_outer_z_obs_neighbors_.insert({ cluster_i, z_outer_z_obs_neighbors_cluster_i });
Log::Info("Nearest neighbors for Vecchia approximation found");
}//end vecchia_approx_
else {
CreateREComponents(num_data_, num_re_group_, data_indices_per_cluster_, cluster_i, re_group_levels, num_data_per_cluster_,
num_re_group_rand_coef_, re_group_rand_coef_data, ind_effect_group_rand_coef_, num_gp_, gp_coords_data,
dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, re_comps_cluster_i);
if (use_woodbury_identity_) {//Create matrices Z and ZtZ if Woodbury identity is used (used only if there are only grouped REs and no GPs)
CHECK(num_comps_total_ == num_re_group_total_);
std::vector<data_size_t> cum_num_rand_eff_cluster_i(num_comps_total_+1);
cum_num_rand_eff_cluster_i[0] = 0;
//Determine number of rows and non-zero entries of Z
int non_zeros = 0;
int ncols = 0;
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_cluster_i[j]->GetZ();
ncols += (int)Z_j->cols();
non_zeros += (int)Z_j->nonZeros();
cum_num_rand_eff_cluster_i[j + 1] = ncols;
}
//Create matrix Z
std::vector<Triplet_t> triplets;
triplets.reserve(non_zeros);
int ncol_prev = 0;
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_cluster_i[j]->GetZ();
for (int k = 0; k < Z_j->outerSize(); ++k) {
for (sp_mat_t::InnerIterator it(*Z_j, k); it; ++it) {
triplets.emplace_back(it.row(), ncol_prev + it.col(), it.value());
}
}
ncol_prev += (int)Z_j->cols();
}
sp_mat_t Z_cluster_i(num_data_per_cluster_[cluster_i], ncols);
Z_cluster_i.setFromTriplets(triplets.begin(), triplets.end());
sp_mat_t Zt_cluster_i = Z_cluster_i.transpose();
sp_mat_t ZtZ_cluster_i = Zt_cluster_i * Z_cluster_i;
Zt_.insert({ cluster_i, Zt_cluster_i });
ZtZ_.insert({ cluster_i, ZtZ_cluster_i });
cum_num_rand_eff_.insert({ cluster_i, cum_num_rand_eff_cluster_i });
//for (int i = 0; i < (int)Z_cluster_i.rows(); ++i) {//For debugging only
// for (int j = 0; j < (int)Z_cluster_i.cols(); ++j) {
// Log::Info("Z(%d,%d) %f", i, j, Z_cluster_i.coeffRef(i, j));
// }
//}
//for (int i = 0; i < (int)ZtZ_cluster_i.rows(); ++i) {//For debugging only
// for (int j = 0; j < (int)ZtZ_cluster_i.cols(); ++j) {
// Log::Info("ZtZ(%d,%d) %f", i, j, ZtZ_cluster_i.coeffRef(i, j));
// }
//}
}//end use_woodbury_identity_
ConstructI<T1>(cluster_i);//Idendity matrices needed for computing inverses of covariance matrices used in gradient descent
}//end not vecchia_approx_
re_comps_.insert({ cluster_i, re_comps_cluster_i });
}
////Following only prints stuff for debugging
//Log::Info("********************** Meta data ********************************");
//Log::Info("num_data_ : %d", num_data_);
//Log::Info("num_clusters_ : %d", num_clusters_);
//Log::Info("num_re_group_ : %d", num_re_group_);
//Log::Info("num_re_group_rand_coef_ : %d", num_re_group_rand_coef_);
//Log::Info("num_re_group_total_ : %d", num_re_group_total_);
//Log::Info("num_gp_rand_coef_ : %d", num_gp_rand_coef_);
//Log::Info("num_gp_total_ : %d", num_gp_total_);
//Log::Info("num_cov_par_: %d", num_cov_par_);
//for (unsigned i = 0; i < ind_par_.size(); i++) { Log::Info("ind_par_[%d]: %d", i, ind_par_[i]); }
//Log::Info("******************************************************");
//int ii = 0;
//for (const auto& cluster_i : unique_clusters_) {
// Log::Info("unique_clusters_[%d]: %d", ii, cluster_i);
// Log::Info("num_data_per_cluster_[%d]: %d", cluster_i, num_data_per_cluster_[cluster_i]);
// //for (int j = 0; j < std::min((int)data_indices_per_cluster_[cluster_i].size(), 10); ++j) { Log::Info("data_indices_per_cluster_[%d][%d]: %d", cluster_i, j, data_indices_per_cluster_[cluster_i][j]); }
// if (num_re_group_ > 0) {
// Log::Info("*********************** Grouped REs *******************************");
// //Log::Info("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name());
// //Log::Info("re_comps_[cluster_i].size(): %d", re_comps_[cluster_i].size());
// //for (const auto& re_comp : re_comps_[cluster_i]) {
// for (int j = 0; j < re_comps_[cluster_i].size(); ++j) {
// std::shared_ptr<RECompGroup<T1>> re_comp_group = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][j]);
// //for (const auto& el : re_comp_group->group_data_) { Log::Info("re_comps_[%d][j].group_data_[i]: %d", cluster_i, el); }
// if (!re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min((int)(*re_comp_group->group_data_).size(), 10); i++) { Log::Info("re_comps_[%d][%d].group_data_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// }
// else if (re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].group_data_ref_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].rand_coef_data_[%d]: %f", cluster_i, j, i, re_comp_group->rand_coef_data_[i]); }
// }
// }
// }
// ii++;
//}
}
/*! \brief Destructor */
~REModelTemplate() {
}
/*! \brief Disable copy */
REModelTemplate& operator=(const REModelTemplate&) = delete;
/*! \brief Disable copy */
REModelTemplate(const REModelTemplate&) = delete;
/*!
* \brief Find parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent
* Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters)
* \param y_data Response variable data
* \param init_cov_pars Initial values for covariance parameters of RE components
* \param[out] optim_cov_pars Optimal covariance parameters
* \param[out] num_it Number of iterations
* \param lr Learning rate
* \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param max_iter Maximal number of iterations
* \param delta_rel_conv Convergence criterion: stop iteration if relative change in parameters is below this value
* \param optimizer Options: "gradient_descent" or "fisher_scoring"
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param[out] std_dev_cov_par Standard deviations for the covariance parameters
* \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information
* \param cov_pars_lag_1 Covariance parameters from previous iteration used for Nesterov step (on transformed scale). Default = nullptr
*/
void OptimCovPar(const double* y_data, double* init_cov_pars, double* optim_cov_pars,
int& num_it, double lr = 0.01, double acc_rate_cov = 0.5, int momentum_offset = 2,
int max_iter = 1000, double delta_rel_conv = 1.0e-6, string_t optimizer = "fisher_scoring",
bool use_nesterov_acc = true, int nesterov_schedule_version = 0,
double* std_dev_cov_par = nullptr, bool calc_std_dev = false, double* cov_pars_lag_1 = nullptr) {
if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer) == SUPPORTED_OPTIM_COV_PAR_.end()) {
Log::Fatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer.c_str());
}
SetY(y_data);
vec_t cov_pars = Eigen::Map<vec_t>(init_cov_pars, num_cov_par_);
vec_t cov_pars_lag1 = (cov_pars_lag_1 == nullptr) ? cov_pars : cov_pars_lag1;
num_it = max_iter;
Log::Debug("Initial covariance parameters");
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); }
for (int it = 0; it < max_iter; ++it) {
ApplyMomentumStep(it, cov_pars, cov_pars_lag1, use_nesterov_acc, acc_rate_cov, nesterov_schedule_version, true, momentum_offset);
SetCovParsComps(cov_pars);
CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used)
CalcYAux();
if (optimizer == "gradient_descent") {//gradient descent
UpdateCovParGradOneIter(lr, cov_pars, true);//closed_form_solution_sigma = true: we profile out sigma (=use closed for expression for error / nugget variance) since this is better for gradient descent (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...)
}
else if (optimizer == "fisher_scoring") {//Fisher scoring
UpdateCovParFisherScoringOneIter(cov_pars, false);//closed_form_solution_sigma = false: we don't profile out sigma (=don't use closed for expression for error / nugget variance) since this is better for Fisher scoring (otherwise much more iterations are needed)
}
CheckNaNInf(cov_pars);
if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) {
Log::Debug("Covariance parameter estimation: iteration number %d", it + 1);
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); }
}
if ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv) {
num_it = it + 1;
break;
}
}
if (num_it == max_iter) {
Log::Warning("Covariance parameter estimation: no convergence after the maximal number of iterations. If this is a problem, you might consider increasing the number of iterations or using a different learning rate.");
}
for (int i = 0; i < num_cov_par_; ++i) {
optim_cov_pars[i] = cov_pars[i];
}
if (calc_std_dev) {
vec_t std_dev_cov(num_cov_par_);
CalcStdDevCovPar(cov_pars, std_dev_cov);
for (int i = 0; i < num_cov_par_; ++i) {
std_dev_cov_par[i] = std_dev_cov[i];
}
}
has_covariates_ = false;
}
/*!
* \brief Find linear regression coefficients and covariance parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent
* Note: You should pre-allocate memory for optim_cov_pars and optim_coef. Their length equal the number of covariance parameters and the number of regression coefficients
* If calc_std_dev=true, you also need to pre-allocate memory for std_dev_cov_par and std_dev_coef of the same length for the standard deviations
* \param y_data Response variable data
* \param covariate_data Covariate data (=independent variables, features)
* \param num_covariates Number of covariates
* \param[out] optim_cov_pars Optimal covariance parameters
* \param[out] optim_coef Optimal regression coefficients
* \param[out] num_it Number of iterations
* \param init_cov_pars Initial values for covariance parameters of RE components
* \param init_coef Initial values for the regression coefficients
* \param lr_coef Learning rate for fixed-effect linear coefficients
* \param lr_cov Learning rate for covariance parameters
* \param acc_rate_coef Acceleration rate for coefficients for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param max_iter Maximal number of iterations
* \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param optimizer_cov Optimizer for covariance parameters. Options: "gradient_descent" or "fisher_scoring"
* \param optimizer_coef Optimizer for coefficients. Options: "gradient_descent" or "wls" (coordinate descent using weighted least squares)
* \param[out] std_dev_cov_par Standard deviations for the covariance parameters
* \param[out] std_dev_coef Standard deviations for the coefficients
* \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information
*/
void OptimLinRegrCoefCovPar(const double* y_data, const double* covariate_data, int num_covariates,
double* optim_cov_pars, double* optim_coef, int& num_it, double* init_cov_pars, double* init_coef = nullptr,
double lr_coef = 0.01, double lr_cov = 0.01, double acc_rate_coef = 0.1, double acc_rate_cov = 0.5, int momentum_offset = 2,
int max_iter = 1000, double delta_rel_conv = 1.0e-6, bool use_nesterov_acc = true, int nesterov_schedule_version = 0,
string_t optimizer_cov = "fisher_scoring", string_t optimizer_coef = "wls", double* std_dev_cov_par = nullptr,
double* std_dev_coef = nullptr, bool calc_std_dev = false) {
if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer_cov) == SUPPORTED_OPTIM_COV_PAR_.end()) {
Log::Fatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer_cov.c_str());
}
if (SUPPORTED_OPTIM_COEF_.find(optimizer_coef) == SUPPORTED_OPTIM_COEF_.end()) {
Log::Fatal("Optimizer option '%s' is not supported for regression coefficients.", optimizer_coef.c_str());
}
CHECK(covariate_data != nullptr);
has_covariates_ = true;
num_coef_ = num_covariates;
X_ = Eigen::Map<const den_mat_t>(covariate_data, num_data_, num_coef_);
//Check whether one of the colums contains only 1's and if not, give out warning
vec_t vec_ones(num_data_);
vec_ones.setOnes();
bool has_intercept = false;
for (int icol = 0; icol < num_coef_; ++icol) {
if ((X_.col(icol) - vec_ones).cwiseAbs().sum() < 0.001) {
has_intercept = true;
break;
}
}
if (!has_intercept) {
Log::Warning("The covariate data contains no column of ones. This means that there is no intercept included.");
}
y_vec_ = Eigen::Map<const vec_t>(y_data, num_data_);
vec_t cov_pars = Eigen::Map<const vec_t>(init_cov_pars, num_cov_par_);
vec_t cov_pars_lag1 = cov_pars;
vec_t beta(num_covariates);
if (init_coef == nullptr) {
beta.setZero();
}
else {
beta = Eigen::Map<const vec_t>(init_coef, num_covariates);
}
vec_t beta_lag1 = beta;
vec_t resid;
num_it = max_iter;
for (int it = 0; it < max_iter; ++it) {
if (it > 0) {
ApplyMomentumStep(it, cov_pars, cov_pars_lag1, use_nesterov_acc, acc_rate_cov, nesterov_schedule_version, true, momentum_offset);
if (optimizer_coef == "gradient_descent") {
ApplyMomentumStep(it, beta, beta_lag1, use_nesterov_acc, acc_rate_coef, nesterov_schedule_version, false, momentum_offset);
}
}
SetCovParsComps(cov_pars);
CalcCovFactor(vecchia_approx_, true, 1., false);
//Update linear regression coefficients
if (optimizer_coef == "gradient_descent") {//one step of gradient descent
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
CalcYAux();
UpdateCoefGradOneIter(lr_coef, cov_pars[0], X_, beta);
}
else if (optimizer_coef == "wls") {//coordinate descent using generalized least squares
SetY(y_vec_.data());
CalcYAux();
beta_lag1 = beta;
UpdateCoefGLS(X_, beta);
}
//Update covariance parameters
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
CalcYAux();
if (optimizer_cov == "gradient_descent") {//one step of gradient descent
UpdateCovParGradOneIter(lr_cov, cov_pars, true);//closed_form_solution_sigma = true: we profile out sigma (=use closed for expression for error / nugget variance) since this is better for gradient descent (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...)
}
else if (optimizer_cov == "fisher_scoring") {//one step of Fisher scoring
UpdateCovParFisherScoringOneIter(cov_pars, false);//closed_form_solution_sigma = false: we don't profile out sigma (=don't use closed for expression for error / nugget variance) since this is better for Fisher scoring (otherwise much more iterations are needed)
}
CheckNaNInf(cov_pars);
if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) {
Log::Debug("Gradient descent iteration number %d", it + 1);
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); }
for (int i = 0; i < std::min((int)beta.size(), 3); ++i) { Log::Debug("beta[%d]: %f", i, beta[i]); }
}
if (((beta - beta_lag1).norm() / beta_lag1.norm() < delta_rel_conv) && ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv)) {
num_it = it + 1;
break;
}
}
if (num_it == max_iter) {
Log::Warning("Covariance parameter estimation: no convergence after the maximal number of iterations");
}
for (int i = 0; i < num_cov_par_; ++i) {
optim_cov_pars[i] = cov_pars[i];
}
if (calc_std_dev) {
vec_t std_dev_cov(num_cov_par_);
CalcStdDevCovPar(cov_pars, std_dev_cov);
for (int i = 0; i < num_cov_par_; ++i) {
std_dev_cov_par[i] = std_dev_cov[i];
}
}
for (int i = 0; i < num_covariates; ++i) {
optim_coef[i] = beta[i];
}
if (calc_std_dev) {
vec_t std_dev_beta(num_covariates);
CalcStdDevCoef(cov_pars, X_, std_dev_beta);
for (int i = 0; i < num_covariates; ++i) {
std_dev_coef[i] = std_dev_beta[i];
}
}
}
/*!
* \brief Calculate the value of the negative log-likelihood
* \param y_data Response variable data
* \param cov_pars Values for covariance parameters of RE components
* \param[out] negll Negative log-likelihood
*/
void EvalNegLogLikelihood(const double* y_data, double* cov_pars, double& negll) {
negll = 0.;
SetY(y_data);
vec_t cov_pars_vec = Eigen::Map<vec_t>(cov_pars, num_cov_par_);
SetCovParsComps(cov_pars_vec);
CalcCovFactor(false, true, 1., false);//Create covariance matrix and factorize it
//Calculate quadratic form
double yTPsiInvy = 0.;
CalcYTPsiIInvY<T1>(yTPsiInvy);
//Calculate log determinant
double log_det = 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
log_det -= D_inv_[cluster_i].diagonal().array().log().sum();
}
else {
if (use_woodbury_identity_) {
log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum());
for (int j = 0; j < num_comps_total_; ++j) {
int num_rand_eff = cum_num_rand_eff_[cluster_i][j + 1] - cum_num_rand_eff_[cluster_i][j];
log_det += (num_rand_eff * std::log(re_comps_[cluster_i][j]->cov_pars_[0]));
}
}
else {
log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum());
}
}
}
negll = yTPsiInvy / 2. / cov_pars[0] + log_det / 2. + num_data_ / 2. * (std::log(cov_pars[0]) + std::log(2 * M_PI));
}
/*!
* \brief Set the data used for making predictions (useful if the same data is used repeatedly, e.g., in validation of GPBoost)
* \param num_data_pred Number of data points for which predictions are made
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
*/
void SetPredictionData(int num_data_pred,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, const double* covariate_data_pred = nullptr) {
if (cluster_ids_data_pred == nullptr) {
cluster_ids_data_pred_.clear();
}
else {
cluster_ids_data_pred_ = std::vector<gp_id_t>(cluster_ids_data_pred, cluster_ids_data_pred + num_data_pred);
}
if (re_group_data_pred == nullptr) {
re_group_levels_pred_.clear();
if (num_re_group_ > 0) {
Log::Fatal("No group data is provided for making predictions");
}
}
else {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred_ = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred_);
}
if (re_group_rand_coef_data_pred == nullptr) {
re_group_rand_coef_data_pred_.clear();
}
else {
re_group_rand_coef_data_pred_ = std::vector<double>(re_group_rand_coef_data_pred, re_group_rand_coef_data_pred + num_data_pred * num_re_group_rand_coef_);
}
if (gp_coords_data_pred == nullptr) {
gp_coords_data_pred_.clear();
}
else {
gp_coords_data_pred_ = std::vector<double>(gp_coords_data_pred, gp_coords_data_pred + num_data_pred * dim_gp_coords_);
}
if (gp_rand_coef_data_pred == nullptr) {
gp_rand_coef_data_pred_.clear();
}
else {
gp_rand_coef_data_pred_ = std::vector<double>(gp_rand_coef_data_pred, gp_rand_coef_data_pred + num_data_pred * num_gp_rand_coef_);
}
if (covariate_data_pred == nullptr) {
covariate_data_pred_.clear();
}
else {
covariate_data_pred_ = std::vector<double>(covariate_data_pred, covariate_data_pred + num_data_pred * num_coef_);
}
}
/*!
* \brief Make predictions: calculate conditional mean and covariance matrix
* Note: You should pre-allocate memory for out_predict
* Its length is equal to num_data_pred if only the conditional mean is predicted (predict_cov_mat=false)
* or num_data_pred * (1 + num_data_pred) if both the conditional mean and covariance matrix are predicted (predict_cov_mat=true)
* \param cov_pars_pred Covariance parameters of components
* \param y_obs Response variable for observed data
* \param num_data_pred Number of data points for which predictions are made
* \param[out] out_predict Conditional mean at prediciton points (="predicted value") followed by (if predict_cov_mat=true) the conditional covariance matrix at in column-major format
* \param predict_cov_mat If true, the conditional covariance matrix is calculated (default=false)
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
* \param coef_pred Coefficients for linear covariates
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param use_saved_data If true, saved data is used and some arguments are ignored
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions (-1 means that the value already set at initialization is used)
*/
void Predict(const double* cov_pars_pred, const double* y_obs, data_size_t num_data_pred,
double* out_predict, bool predict_cov_mat = false,
const double* covariate_data_pred = nullptr, const double* coef_pred = nullptr,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, bool use_saved_data = false,
const char* vecchia_pred_type = nullptr, int num_neighbors_pred = -1) {
//Should previously set data be used?
std::vector<std::vector<string_t>> re_group_levels_pred;//Matrix with group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
if (use_saved_data) {
re_group_levels_pred = re_group_levels_pred_;
if (cluster_ids_data_pred_.empty()) {
cluster_ids_data_pred = nullptr;
}
else {
cluster_ids_data_pred = cluster_ids_data_pred_.data();
}
if (re_group_rand_coef_data_pred_.empty()) {
re_group_rand_coef_data_pred = nullptr;
}
else {
re_group_rand_coef_data_pred = re_group_rand_coef_data_pred_.data();
}
if (gp_coords_data_pred_.empty()) {
gp_coords_data_pred = nullptr;
}
else {
gp_coords_data_pred = gp_coords_data_pred_.data();
}
if (gp_rand_coef_data_pred_.empty()) {
gp_rand_coef_data_pred = nullptr;
}
else {
gp_rand_coef_data_pred = gp_rand_coef_data_pred_.data();
}
if (covariate_data_pred_.empty()) {
covariate_data_pred = nullptr;
}
else {
covariate_data_pred = covariate_data_pred_.data();
}
}
else {
if (num_re_group_ > 0) {
if (re_group_data_pred == nullptr) {
Log::Fatal("No group data is provided for making predictions");
}
else {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred);
}
}
}
//Some checks
CHECK(num_data_pred > 0);
if (has_covariates_) {
CHECK(covariate_data_pred != nullptr);
CHECK(coef_pred != nullptr);
}
if (y_obs == nullptr) {
if (y_.empty()) {
Log::Fatal("Observed data is not provided and has not been set before");
}
}
//Check whether some data is missing
if (re_group_rand_coef_data_pred == nullptr && num_re_group_rand_coef_ > 0) {
Log::Fatal("No covariate data for grouped random coefficients is provided for making predictions");
}
if (gp_coords_data_pred == nullptr && num_gp_ > 0) {
Log::Warning("No coordinate data for the Gaussian process is provided for making predictions");
}
if (gp_rand_coef_data_pred == nullptr && num_gp_rand_coef_ > 0) {
Log::Warning("No covariate data for Gaussian process random coefficients is provided for making predictions");
}
if (num_data_pred > 10000 && predict_cov_mat) {
double num_mem_d = ((double)num_data_pred) * ((double)num_data_pred);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
Log::Warning("The covariance matrix can be very large for large sample sizes which might lead to memory limitations. In your case (n = %d), the covariance needs at least approximately %d mb of memory. If you only need variances or covariances for linear combinations, contact the developer of this package and ask to implement this feature.", num_data_pred, mem_size);
}
if (vecchia_approx_) {
if (vecchia_pred_type != nullptr) {
string_t vecchia_pred_type_S = std::string(vecchia_pred_type);
CHECK(vecchia_pred_type_S == "order_obs_first_cond_obs_only" ||
vecchia_pred_type_S == "order_obs_first_cond_all" ||
vecchia_pred_type_S == "order_pred_first" ||
vecchia_pred_type_S == "latent_order_obs_first_cond_obs_only" ||
vecchia_pred_type_S == "latent_order_obs_first_cond_all");
vecchia_pred_type_ = vecchia_pred_type_S;
}
if (num_neighbors_pred > 0) {
num_neighbors_pred_ = num_neighbors_pred;
}
}
vec_t coef;
if (has_covariates_) {
coef = Eigen::Map<const vec_t>(coef_pred, num_coef_);
den_mat_t X_pred = Eigen::Map<const den_mat_t>(covariate_data_pred, num_data_pred, num_coef_);
vec_t mu = X_pred * coef;
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_pred; ++i) {
out_predict[i] = mu[i];
}
}
vec_t cov_pars = Eigen::Map<const vec_t>(cov_pars_pred, num_cov_par_);
//Set up cluster IDs
std::map<gp_id_t, int> num_data_per_cluster_pred;
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_pred;
std::vector<gp_id_t> unique_clusters_pred;
data_size_t num_clusters_pred;
SetUpGPIds(num_data_pred, cluster_ids_data_pred, num_data_per_cluster_pred,
data_indices_per_cluster_pred, unique_clusters_pred, num_clusters_pred);
//Check whether predictions are made for existing clusters or if only for new independet clusters predictions are made
bool pred_for_observed_data = false;
for (const auto& cluster_i : unique_clusters_pred) {
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) != unique_clusters_.end()) {
pred_for_observed_data = true;
break;
}
}
//Factorize covariance matrix and calculate Psi^{-1}y_obs (if required for prediction)
if (pred_for_observed_data) {//TODO: this acutally needs to be done only for the GP realizations for which predictions are made (currently it is done for all of them in unique_clusters_pred)
if (has_covariates_) {
vec_t resid;
if (y_obs != nullptr) {
vec_t y = Eigen::Map<const vec_t>(y_obs, num_data_);
resid = y - (X_ * coef);
}
else {
resid = y_vec_ - (X_ * coef);
}
SetY(resid.data());
}
else {
if (y_obs != nullptr) {
SetY(y_obs);
}
}
SetCovParsComps(cov_pars);
if (!vecchia_approx_) {
CalcCovFactor(false, true, 1., false);//no need to do this for the Vecchia approximation, is done in the prediction steps
CalcYAux();
}
}//end if(pred_for_observed_data)
//Initialize covariance matrix
if (predict_cov_mat) {//TODO: avoid unnecessary initialization (only set to 0 for covariances accross different realizations of GPs)
#pragma omp parallel for schedule(static)
for (int i = 0; i < (num_data_pred * num_data_pred); ++i) {
out_predict[i + num_data_pred] = 0.;
}
}
for (const auto& cluster_i : unique_clusters_pred) {
//Case 1: no data observed for this Gaussian process with ID 'cluster_i'. Thus use prior mean (0) and prior covariance matrix
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) == unique_clusters_.end()) {
if (!has_covariates_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = 0.;
}
}
if (predict_cov_mat) {
T1 psi;
std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i;
if (vecchia_approx_) {
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
CreateREComponentsVecchia(num_data_pred, data_indices_per_cluster_pred, cluster_i, num_data_per_cluster_pred,
gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i, "none", num_neighbors_pred_);//TODO: maybe also use ordering for making predictions? (need to check that there are not errors)
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
}
sp_mat_t B_cluster_i;
sp_mat_t D_inv_cluster_i;
std::vector<sp_mat_t> B_grad_cluster_i;//not used, but needs to be passed to function
std::vector<sp_mat_t> D_grad_cluster_i;//not used, but needs to be passed to function
CalcCovFactorVecchia(num_data_per_cluster_pred[cluster_i], false, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i,
B_cluster_i, D_inv_cluster_i, B_grad_cluster_i, D_grad_cluster_i);
//Calculate Psi
sp_mat_t D_sqrt(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
D_sqrt.setIdentity();
D_sqrt.diagonal().array() = D_inv_cluster_i.diagonal().array().pow(-0.5);
sp_mat_t B_inv_D_sqrt;
eigen_sp_Lower_sp_RHS_cs_solve(B_cluster_i, D_sqrt, B_inv_D_sqrt, true);
psi = B_inv_D_sqrt * B_inv_D_sqrt.transpose();
}//end vecchia_approx_
else {//not vecchia_approx_
psi.resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
psi.setIdentity();
CreateREComponents(num_data_pred, num_re_group_, data_indices_per_cluster_pred, cluster_i, re_group_levels_pred, num_data_per_cluster_pred,
num_re_group_rand_coef_, re_group_rand_coef_data_pred, ind_effect_group_rand_coef_, num_gp_, gp_coords_data_pred,
dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, re_comps_cluster_i);
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
re_comps_cluster_i[j]->CalcSigma();
psi += (*(re_comps_cluster_i[j]->GetZSigmaZt().get()));
}
}//end not vecchia_approx_
psi *= cov_pars[0];
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = psi.coeff(j, i);
}
}
}//end predict_cov_mat
}//end cluster_i with no observed data
else {//Case 2: there exists observed data for this cluster_i (= typically the case)
den_mat_t gp_coords_mat_pred;
if (num_gp_ > 0) {
std::vector<double> gp_coords_pred;
for (int j = 0; j < dim_gp_coords_; ++j) {
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
gp_coords_pred.push_back(gp_coords_data_pred[j * num_data_pred + id]);
}
}
gp_coords_mat_pred = Eigen::Map<den_mat_t>(gp_coords_pred.data(), num_data_per_cluster_pred[cluster_i], dim_gp_coords_);
}
vec_t mean_pred_id(num_data_per_cluster_pred[cluster_i]);
T1 cov_mat_pred_id;
if (predict_cov_mat) {
cov_mat_pred_id = T1(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
}
if (vecchia_approx_) {//vecchia_approx_
std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][ind_intercept_gp_]);
int num_data_tot = num_data_per_cluster_[cluster_i] + num_data_per_cluster_pred[cluster_i];
double num_mem_d = ((double)num_neighbors_pred_) * ((double)num_neighbors_pred_) * (double)(num_data_tot)+(double)(num_neighbors_pred_) * (double)(num_data_tot);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 4000) {
Log::Warning("The current implementation of the Vecchia approximation needs a lot of memory if the number of neighbors is large. In your case (nb. of neighbors = %d, nb. of observations = %d, nb. of predictions = %d), this needs at least approximately %d mb of memory. If this is a problem for you, contact the developer of this package and ask to change this.", num_neighbors_pred_, num_data_per_cluster_[cluster_i], num_data_per_cluster_pred[cluster_i], mem_size);
}
if (vecchia_pred_type_ == "order_obs_first_cond_obs_only") {
CalcPredVecchiaObservedFirstOrder(true, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_obs_first_cond_all") {
CalcPredVecchiaObservedFirstOrder(false, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_pred_first") {
CalcPredVecchiaPredictedFirstOrder(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_obs_only") {
CalcPredVecchiaLatentObservedFirstOrder(true, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_all") {
CalcPredVecchiaLatentObservedFirstOrder(false, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
}//end vecchia_approx_
else {// not vecchia_approx_
CalcPred(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_group_levels_pred, re_group_rand_coef_data_pred, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}//end not vecchia_approx_
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
if (has_covariates_) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] += mean_pred_id[i];
}
else {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i];
}
}
if (predict_cov_mat) {
cov_mat_pred_id *= cov_pars[0];
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = cov_mat_pred_id.coeff(j, i);//cov_mat_pred_id_den(j, i);
}
}
}
}//end cluster_i with data
}//end loop over cluster
}
/*!
* \brief Find "reasonable" default values for the intial values of the covariance parameters (on transformed scale)
* Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters)
* \param y_data Response variable data
* \param[out] init_cov_pars Initial values for covariance parameters of RE components
*/
void FindInitCovPar(const double* y_data, double* init_cov_pars) {
double mean = 0;
for (int i = 0; i < num_data_; ++i) {
mean += y_data[i];
}
mean /= num_data_;
double var = 0;
for (int i = 0; i < num_data_; ++i) {
var += (y_data[i] - mean) * (y_data[i] - mean);
}
var /= (num_data_ - 1);
init_cov_pars[0] = var;
int ind_par = 1;
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_j = ind_par_[j + 1] - ind_par_[j];
vec_t pars = vec_t(num_par_j);
re_comps_[unique_clusters_[0]][j]->FindInitCovPar(pars);
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
}
}
int num_cov_par() {
return(num_cov_par_);
}
/*!
* \brief Calculate the leaf values when performing a Newton update step after the tree structure has been found in tree-boosting
* Note: only used in GPBoost for combined Gaussian process tree-boosting (this is called from 'objective_function_->NewtonUpdateLeafValues'). It is assumed that 'CalcYAux' has been called before (from 'objective_function_->GetGradients').
* \param data_leaf_index Leaf index for every data point (array of size num_data)
* \param num_leaves Number of leaves
* \param[out] leaf_values Leaf values when performing a Newton update step (array of size num_leaves)
* \param marg_variance The marginal variance. Default = 1. Can be used to multiply values by it since Newton updates do not depend on it but 'CalcYAux' might have been called using marg_variance!=1.
*/
void NewtonUpdateLeafValues(const int* data_leaf_index,
const int num_leaves, double* leaf_values, double marg_variance = 1.) {
CHECK(y_aux_has_been_calculated_);//y_aux_ has already been calculated when calculating the gradient for finding the tree structure from 'GetGradients' in 'regression_objetive.hpp'
den_mat_t HTPsiInvH(num_leaves, num_leaves);
vec_t HTYAux(num_leaves);
HTPsiInvH.setZero();
HTYAux.setZero();
for (const auto& cluster_i : unique_clusters_) {
//Entries for matrix H_cluster_i = incidence matrix H that relates tree leaves to observations for cluster_i
std::vector<Triplet_t> entries_H_cluster_i(num_data_per_cluster_[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
entries_H_cluster_i[i] = Triplet_t(i, data_leaf_index[data_indices_per_cluster_[cluster_i][i]], 1.);
}
den_mat_t HTPsiInvH_cluster_i;
if (vecchia_approx_) {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);//row major format is needed for Vecchia approx.
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
sp_mat_t BH = B_[cluster_i] * H_cluster_i;
HTPsiInvH_cluster_i = den_mat_t(BH.transpose() * D_inv_[cluster_i] * BH);
}
else {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
if (use_woodbury_identity_) {
sp_mat_t ZtH_cluster_i = Zt_[cluster_i] * H_cluster_i;
T1 MInvSqrtZtH;
CalcPsiInvSqrtH(MInvSqrtZtH, ZtH_cluster_i, cluster_i);
HTPsiInvH_cluster_i = H_cluster_i.transpose() * H_cluster_i - MInvSqrtZtH.transpose() * MInvSqrtZtH;
}
else {
T1 PsiInvSqrtH;
CalcPsiInvSqrtH(PsiInvSqrtH, H_cluster_i, cluster_i);
HTPsiInvH_cluster_i = PsiInvSqrtH.transpose() * PsiInvSqrtH;
}
}
HTPsiInvH += HTPsiInvH_cluster_i;
}
HTYAux *= marg_variance;
vec_t new_leaf_values = HTPsiInvH.llt().solve(HTYAux);
for (int i = 0; i < num_leaves; ++i) {
leaf_values[i] = new_leaf_values[i];
}
}
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points */
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_;
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization */
std::map<gp_id_t, int> num_data_per_cluster_;
/*! \brief Number of independent realizations of the REs/GPs */
data_size_t num_clusters_;
/*! \brief Unique labels of independent realizations */
std::vector<gp_id_t> unique_clusters_;
/*! \brief Number of grouped (intercept) random effects */
data_size_t num_re_group_ = 0;
/*! \brief Number of grouped random coefficients */
data_size_t num_re_group_rand_coef_ = 0;
/*! \brief Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting starts at 1 (and ends at the number of base intercept random effects). Length of vector = num_re_group_rand_coef_. */
std::vector<int> ind_effect_group_rand_coef_;
/*! \brief Total number of grouped random effects (random intercepts plus random coefficients (slopes)) */
data_size_t num_re_group_total_ = 0;
/*! \brief 1 if there is a Gaussian process 0 otherwise */
data_size_t num_gp_ = 0;
/*! \brief Type of GP. 0 = classical (spatial) GP, 1 = spatio-temporal GP */ //TODO: remove?
int8_t GP_type_ = 0;
/*! \brief Number of random coefficient GPs */
data_size_t num_gp_rand_coef_ = 0;
/*! \brief Total number of GPs (random intercepts plus random coefficients) */
data_size_t num_gp_total_ = 0;
/*! \brief Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs */
int ind_intercept_gp_;
/*! \brief Dimension of the coordinates (=number of features) for Gaussian process */
int dim_gp_coords_ = 2;//required to save since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Type of covariance(kernel) function for Gaussian processes */
string_t cov_fct_ = "exponential";//required to also save here since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */
double cov_fct_shape_ = 0.;
/*! \brief Keys: labels of independent realizations of REs/GPs, values: vectors with individual RE/GP components */
std::map<gp_id_t, std::vector<std::shared_ptr<RECompBase<T1>>>> re_comps_;
/*! \brief Indices of parameters of RE components in global parameter vector cov_pars. ind_par_[i] + 1 and ind_par_[i+1] are the indices of the first and last parameter of component number i */
std::vector<data_size_t> ind_par_;
/*! \brief Number of covariance parameters */
data_size_t num_cov_par_;
/*! \brief Total number of random effect components (grouped REs plus other GPs) */
data_size_t num_comps_total_ = 0;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Symbolic Cholesky decomposition of Psi matrices */
std::map<gp_id_t, T2> chol_facts_solve_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky factors of Psi matrices */ //TODO: above needed or can pattern be saved somewhere else?
std::map<gp_id_t, T1> chol_facts_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: **** */ //TODO: remove?
std::map<gp_id_t, T1> Id_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix **** */
std::map<gp_id_t, cs> Id_cs_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: data y */
std::map<gp_id_t, vec_t> y_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */
std::map<gp_id_t, vec_t> y_aux_;
/*! \brief Indicates whether y_aux_ has been calculated */
bool y_aux_has_been_calculated_ = false;
/*! \brief Collects inverse covariance matrices Psi^{-1} (usually not saved, but used e.g. in Fisher scoring without the Vecchia approximation) */
std::map<gp_id_t, T1> psi_inv_;
/*! \brief Copy of response data (used only in case there are also linear covariates since then y_ is modified during the algorithm) */
vec_t y_vec_;
/*! \brief If true, a symbolic decomposition is first done when calculating the Cholesky factor of the covariance matrix (only for sparse matrices) */
bool do_symbolic_decomposition_ = true;
/*! \brief If true, the Woodbury, Sherman and Morrison matrix inversion formula is used for calculating the inverse of the covariance matrix (only used if there are only grouped REs and no Gaussian processes) */
bool use_woodbury_identity_ = false;
/*! \brief Collects matrices Z^T (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, sp_mat_t> Zt_;
/*! \brief Collects matrices Z^TZ (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, sp_mat_t> ZtZ_;
/*! \brief Cumulative number of random effects for components (usually not saved, only saved when use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, std::vector<data_size_t>> cum_num_rand_eff_;//The random effects of component j start at cum_num_rand_eff_[0][j]+1 and end at cum_num_rand_eff_[0][j+1]
/*! \brief If true, the model linearly incluses covariates */
bool has_covariates_ = false;
/*! \brief Number of covariates */
int num_coef_;
/*! \brief Covariate data */
den_mat_t X_;
/*! \brief List of supported optimizers for covariance parameters */
const std::set<string_t> SUPPORTED_OPTIM_COV_PAR_{ "gradient_descent", "fisher_scoring" };
/*! \brief List of supported optimizers for regression coefficients */
const std::set<string_t> SUPPORTED_OPTIM_COEF_{ "gradient_descent", "wls" };
/*! \brief If true, the Veccia approximation is used for the Gaussian process */
bool vecchia_approx_ = false;
/*! \brief The number of neighbors used in the Vecchia approximation */
int num_neighbors_;
/*! \brief Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering */
string_t vecchia_ordering_ = "none";
/*! \brief The number of neighbors used in the Vecchia approximation for making predictions */
int num_neighbors_pred_;
/*! \brief Ordering used in the Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions */
string_t vecchia_pred_type_ = "order_obs_first_cond_obs_only";//This is saved here and not simply set in the prediction function since it needs to be used repeatedly in the GPBoost algorithm when making predictions in "regression_metric.hpp" and the way predictions are done for the Vecchia approximation should be decoupled from the boosting algorithm
/*! \brief List of supported covariance functions */
const std::set<string_t> SUPPORTED_VECCHIA_PRED_TYPES_{ "order_obs_first_cond_obs_only",
"order_obs_first_cond_all", "order_pred_first",
"latent_order_obs_first_cond_obs_only", "latent_order_obs_first_cond_all" };
/*! \brief Collects indices of nearest neighbors (used for Vecchia approximation) */
std::map<gp_id_t, std::vector<std::vector<int>>> nearest_neighbors_;
/*! \brief Distances between locations and their nearest neighbors (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_obs_neighbors_;
/*! \brief Distances between nearest neighbors for all locations (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_between_neighbors_;//TODO: this contains duplicate information (i.e. distances might be saved reduntly several times). But there is a trade-off between storage and computational speed. I currently don't see a way for saving unique distances without copying them when using the^m.
/*! \brief Outer product of covariate vector at observations and neighbors with itself. First index = cluster, second index = data point i, third index = GP number j (this is used only if the Vecchia approximation is used, this is handled saved directly in the GP component using Z_) */
std::map<gp_id_t, std::vector<std::vector<den_mat_t>>> z_outer_z_obs_neighbors_;
/*! \brief Collects matrices B = I - A (=Cholesky factor of inverse covariance) for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> B_;
/*! \brief Collects diagonal matrices D^-1 for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> D_inv_;
/*! \brief Collects derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> B_grad_;
/*! \brief Collects derivatives of matrices D for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> D_grad_;
/*! \brief Triplets for intializing the matrices B */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_;
/*! \brief Triplets for intializing the matrices B_grad */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_grad_;
/*! \brief Variance of idiosyncratic error term (nugget effect) */
double sigma2_;
/*! \brief Cluster IDs for prediction */
std::vector<gp_id_t> cluster_ids_data_pred_;
/*! \brief Levels of grouped RE for prediction */
std::vector<std::vector<string_t>> re_group_levels_pred_;
/*! \brief Covariate data for grouped random RE for prediction */
std::vector<double> re_group_rand_coef_data_pred_;
/*! \brief Coordinates for GP for prediction */
std::vector<double> gp_coords_data_pred_;
/*! \brief Covariate data for random GP for prediction */
std::vector<double> gp_rand_coef_data_pred_;
/*! \brief Covariate data for linear regression term */
std::vector<double> covariate_data_pred_;
/*! \brief Nesterov schedule */
double NesterovSchedule(int iter, int momentum_schedule_version = 0,
double nesterov_acc_rate = 0.5, int momentum_offset = 2) {
if (iter < momentum_offset) {
return(0.);
}
else {
if (momentum_schedule_version == 0) {
return(nesterov_acc_rate);
}
else if (momentum_schedule_version == 1) {
return(1. - (3. / (6. + iter)));
}
else {
return(0.);
}
}
}
/*! \brief mutex for threading safe call */
std::mutex mutex_;
/*! \brief Constructs identity matrices if sparse matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
int dim_I = use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i];
T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
cs Id_cs = cs();//same for cs type //TODO: construct this independently of Id_ , but then care need to be taken for deleting the pointer objects.
Id_cs.nzmax = dim_I;
Id_cs.m = dim_I;
Id_cs.n = dim_I;
Id_[cluster_i].makeCompressed();
Id_cs.p = reinterpret_cast<csi*>(Id_[cluster_i].outerIndexPtr());
Id_cs.i = reinterpret_cast<csi*>(Id_[cluster_i].innerIndexPtr());
Id_cs.x = Id_[cluster_i].valuePtr();
Id_cs.nz = -1;
Id_cs_.insert({ cluster_i, Id_cs });
}
/*! \brief Constructs identity matrices if dense matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
int dim_I = use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i];
T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
}
/*!
* \brief Set response variable data (y_)
* \param y_data Response variable data
*/
void SetY(const double* y_data) {
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
y_[unique_clusters_[0]] = Eigen::Map<const vec_t>(y_data, num_data_);
//y_[unique_clusters_[0]] = vec_t(num_data_);
//y_[unique_clusters_[0]].setZero();
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]];
}
}
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). Array needs to be pre-allocated of length num_data_
*/
void GetYAux(double* y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
for (int j = 0; j < num_data_; ++j) {
y_aux[j] = y_aux_[unique_clusters_[0]][j];
}
}
else {
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_aux[data_indices_per_cluster_[cluster_i][j]] = y_aux_[cluster_i][j];
}
}
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). This vector needs to be pre-allocated of length num_data_
*/
void GetYAux(vec_t& y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
y_aux = y_aux_[unique_clusters_[0]];
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_aux(data_indices_per_cluster_[cluster_i]) = y_aux_[cluster_i];
}
}
}
/*! \brief Do Cholesky decomposition if sparse matrices are used */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
chol_facts_solve_[cluster_i].analyzePattern(psi);
}
chol_facts_solve_[cluster_i].factorize(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
chol_facts_[cluster_i].makeCompressed();
}
/*! \brief Do Cholesky decomposition if dense matrices are used */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
Log::Warning("Pattern of Cholesky factor is not analyzed when dense matrices are used.");
}
chol_facts_solve_[cluster_i].compute(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
}
/*! \brief Caclulate Psi^(-1) if sparse matrices are used */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
if (use_woodbury_identity_) {
sp_mat_t MInvSqrtZt;
sp_mat_t L_inv;
eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Id_[cluster_i], L_inv, true);
MInvSqrtZt = L_inv * Zt_[cluster_i];
////Alternative option (crashes when eigen_sp_Lower_sp_RHS_cs_solve uses sp_Lower_sp_RHS_cs_solve / cs_spsolve due to Eigen bug)
//eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Zt_[cluster_i], MInvSqrtZt, true);
psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;
psi_inv.diagonal().array() += 1.0;
}
else {
//Using CSparse function 'cs_spsolve'
cs L_cs = cs();//Prepare LHS
L_cs.nzmax = (int)chol_facts_[cluster_i].nonZeros();
L_cs.m = num_data_per_cluster_[cluster_i];
L_cs.n = num_data_per_cluster_[cluster_i];
L_cs.p = reinterpret_cast<csi*>(chol_facts_[cluster_i].outerIndexPtr());
L_cs.i = reinterpret_cast<csi*>(chol_facts_[cluster_i].innerIndexPtr());
L_cs.x = chol_facts_[cluster_i].valuePtr();
L_cs.nz = -1;
//Invert Cholesky factor
sp_mat_t L_inv;
sp_Lower_sp_RHS_cs_solve(&L_cs, &Id_cs_[cluster_i], L_inv, true);
psi_inv = L_inv.transpose() * L_inv;
////Version 2: doing sparse solving "by hand" but ignoring sparse RHS
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]);
//for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]);
//}
//const sp_mat_t L_inv = L_inv_dens.sparseView();
//psi_inv = L_inv.transpose() * L_inv;
////Version 1: let Eigen do the solving
//cpsi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
}
}
/*! \brief Caclulate Psi^(-1) if dense matrices are used */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
if (use_woodbury_identity_) {//should currently not be called as use_woodbury_identity_ is only true for grouped REs only i.e. sparse matrices
T3 MInvSqrtZt = Zt_[cluster_i];
#pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization?
for (int j = 0; j < (int)MInvSqrtZt.cols(); ++j) {
L_solve(chol_facts_[cluster_i].data(), (int)chol_facts_[cluster_i].cols(), MInvSqrtZt.data() + j * (int)MInvSqrtZt.cols());
}
psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;
psi_inv.diagonal().array() += 1.0;
}
else {
////Version 1
//psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
//Version 2: solving by hand
T3 L_inv = Id_[cluster_i];
#pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], L_inv.data() + j * num_data_per_cluster_[cluster_i]);
}
//chol_facts_[cluster_i].triangularView<Eigen::Lower>().solveInPlace(L_inv); //slower
psi_inv = L_inv.transpose() * L_inv;
// Using dpotri from LAPACK does not work since LAPACK is not installed
//int info = 0;
//int n = num_data_per_cluster_[cluster_i];
//int lda = num_data_per_cluster_[cluster_i];
//char* uplo = "L";
//den_mat_t M = chol_facts_[cluster_i];
//BLASFUNC(dpotri)(uplo, &n, M.data(), &lda, &info);
}
}
/*! \brief Caclulate Psi^(-0.5)H if sparse matrices are used. Used in 'NewtonUpdateLeafValues' */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(T3& PsiInvSqrtH, sp_mat_t& H, gp_id_t cluster_i) {
eigen_sp_Lower_sp_RHS_solve(chol_facts_[cluster_i], H, PsiInvSqrtH, true);
//Note: Using CSparse function 'cs_spsolve' via 'eigen_sp_Lower_sp_RHS_cs_solve' crashes due to bug in Eigen
}
/*! \brief Caclulate Psi^(-0.5)H if dense matrices are used. Used in 'NewtonUpdateLeafValues' */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(T3& PsiInvSqrtH, sp_mat_t& H, gp_id_t cluster_i) {
PsiInvSqrtH = den_mat_t(H);
#pragma omp parallel for schedule(static)
for (int j = 0; j < H.cols(); ++j) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]);
}
}
///*!
//* \brief Caclulate X^TPsi^(-1)X
//* \param X Covariate data matrix X
//* \param[out] XT_psi_inv_X X^TPsi^(-1)X
//*/
// template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// L_solve(chol_facts_[cluster0].data(), num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
// //same for sparse matrices
// template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// sp_L_solve(chol_facts_[cluster0].valuePtr(), chol_facts_[cluster0].innerIndexPtr(), chol_facts_[cluster0].outerIndexPtr(),
// num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(chol_facts_[cluster_i].valuePtr(), chol_facts_[cluster_i].innerIndexPtr(), chol_facts_[cluster_i].outerIndexPtr(),
// num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
/*!
* \brief Caclulate X^TPsi^(-1)X
* \param X Covariate data matrix X
* \param[out] XT_psi_inv_X X^TPsi^(-1)X
*/
void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {//only one cluster / idependent GP realization
if (vecchia_approx_) {
den_mat_t BX = B_[unique_clusters_[0]] * X;
XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX;
}
else {
if (use_woodbury_identity_) {
den_mat_t ZtX = Zt_[unique_clusters_[0]] * X;
XT_psi_inv_X = X.transpose() * X - ZtX.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(ZtX);
}
else {
XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X);
}
}
}
else {//more than one cluster / idependent GP realization
XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
XT_psi_inv_X.setZero();
den_mat_t BX;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
}
else {
if (use_woodbury_identity_) {
den_mat_t ZtX = Zt_[cluster_i] * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all);
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) -
ZtX.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(ZtX);
}
else {
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all));
}
}
}
}
}
/*!
* \brief Initialize data structures for handling independent realizations of the Gaussian processes. Answers written on arguments.
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization)
* \param[out] num_data_per_cluster Keys: labels of independent clusters, values: number of data points per independent realization
* \param[out] data_indices_per_cluster Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param[out] unique_clusters Unique labels of independent realizations
* \param[out] num_clusters Number of independent clusters
*/
void SetUpGPIds(data_size_t num_data, const gp_id_t* cluster_ids_data,
std::map<gp_id_t, int>& num_data_per_cluster, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
std::vector<gp_id_t>& unique_clusters, data_size_t& num_clusters) {
if (cluster_ids_data != nullptr) {
for (int i = 0; i < num_data; ++i) {
if (num_data_per_cluster.find(cluster_ids_data[i]) == num_data_per_cluster.end()) {//first occurrence of cluster_ids_data[i]
unique_clusters.push_back(cluster_ids_data[i]);
num_data_per_cluster.insert({ cluster_ids_data[i], 1 });
std::vector<int> id;
id.push_back(i);
data_indices_per_cluster.insert({ cluster_ids_data[i], id });
}
else {
num_data_per_cluster[cluster_ids_data[i]] += 1;
data_indices_per_cluster[cluster_ids_data[i]].push_back(i);
}
}
num_clusters = (data_size_t)unique_clusters.size();
}
else {
unique_clusters.push_back(0);
num_data_per_cluster.insert({ 0, num_data });
num_clusters = 1;
std::vector<int> gp_id_vec(num_data);
for (int i = 0; i < num_data; ++i) {
gp_id_vec[i] = i;
}
data_indices_per_cluster.insert({ 0, gp_id_vec });
}
}
/*!
* \brief Convert characters in 'const char* re_group_data' to matrix (num_re_group x num_data) with strings of group labels
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param[out] Matrix of dimension num_re_group x num_data with strings of group labels for levels of grouped random effects
*/
void ConvertCharToStringGroupLevels(data_size_t num_data, data_size_t num_re_group,
const char* re_group_data, std::vector<std::vector<string_t>>& re_group_levels) {
int char_start = 0;
for (int ire = 0; ire < num_re_group; ++ire) {//TODO: catch / report potential error if format of re_group_data is not correct
for (int id = 0; id < num_data; ++id) {
int number_chars = 0;
while (re_group_data[char_start + number_chars] != '\0') {
number_chars++;
}
re_group_levels[ire][id] = std::string(re_group_data + char_start);
char_start += number_chars + 1;
}
}
}
/*!
* \brief Initialize individual component models and collect them in a containter
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param Group levels for every grouped random effect
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param num_re_group_rand_coef Number of grouped random coefficients
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_gp Number of Gaussian processes (intercept only, random coefficients not counting)
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param ind_intercept_gp Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs
* \param[out] re_comps_cluster_i Container that collects the individual component models
*/
void CreateREComponents(data_size_t num_data, data_size_t num_re_group, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i,
std::vector<std::vector<string_t>>& re_group_levels, std::map<gp_id_t, int>& num_data_per_cluster, data_size_t num_re_group_rand_coef,
const double* re_group_rand_coef_data, std::vector<int>& ind_effect_group_rand_coef, data_size_t num_gp, const double* gp_coords_data, int dim_gp_coords,
const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, int ind_intercept_gp,
std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i) {
//Grouped REs
if (num_re_group > 0) {
for (int j = 0; j < num_re_group; ++j) {
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
group_data.push_back(re_group_levels[j][id]);//group_data_.push_back(std::string(re_group_data[j * num_data_ + id]));
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>(group_data)));
}
//Random slopes
if (num_re_group_rand_coef > 0) {
for (int j = 0; j < num_re_group_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_cluster_i[ind_effect_group_rand_coef[j] - 1]);//Subtract -1 since ind_effect_group_rand_coef[j] starts counting at 1 not 0
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>(re_comp->group_data_, re_comp->map_group_label_index_, re_comp->num_group_, rand_coef_data)));
}
}
}
//GPs
if (num_gp > 0) {
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, true)));
//Random slopes
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_cluster_i[ind_intercept_gp]);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(re_comp->dist_, re_comp->has_Z_,
&re_comp->Z_, rand_coef_data, cov_fct, cov_fct_shape)));
}
}
}
}
/*!
* \brief Initialize individual component models and collect them in a containter when the Vecchia approximation is used
* \param num_data Number of data points
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param[out] re_comps_cluster_i Container that collects the individual component models
* \param[out] nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param[out] dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param[out] dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param[out] entries_init_B_cluster_i Triplets for intializing the matrices B
* \param[out] entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param[out] z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param num_neighbors The number of neighbors used in the Vecchia approximation
*/
void CreateREComponentsVecchia(data_size_t num_data, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i, std::map<gp_id_t, int>& num_data_per_cluster,
const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape,
std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, string_t vecchia_ordering = "none", int num_neighbors = 30) {
if (vecchia_ordering == "random") {
unsigned seed = 0;
std::shuffle(data_indices_per_cluster[cluster_i].begin(), data_indices_per_cluster[cluster_i].end(), std::default_random_engine(seed));
}
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, false)));
find_nearest_neighbors_Veccia_fast(gp_coords_mat, num_data_per_cluster[cluster_i], num_neighbors,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
for (int j = 0; j < (int)nearest_neighbors_cluster_i[i].size(); ++j) {
entries_init_B_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
entries_init_B_grad_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
}
entries_init_B_cluster_i.push_back(Triplet_t(i, i, 1.));//Put 1's on the diagonal since B = I - A
}
//Random coefficients
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(rand_coef_data, cov_fct, cov_fct_shape)));
//save random coefficient data in the form ot outer product matrices
#pragma omp for schedule(static)
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef);
int dim_z = (i == 0) ? 1 : ((int)nearest_neighbors_cluster_i[i].size() + 1);
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
}
/*!
* \brief Set the covariance parameters of the components
* \param cov_pars Covariance parameters
*/
void SetCovParsComps(const vec_t& cov_pars) {
CHECK(cov_pars.size() == num_cov_par_);
sigma2_ = cov_pars[0];
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
//const std::vector<double> pars = std::vector<double>(cov_pars.begin() + ind_par_[j] + 1, cov_pars.begin() + ind_par_[j + 1] + 1);
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
re_comps_[cluster_i][j]->SetCovPars(pars);
}
}
}
/*!
* \brief Transform the covariance parameters to the scake on which the MLE is found
* \param cov_pars_trans Covariance parameters
* \param[out] pars_trans Transformed covariance parameters
*/
void TransformCovPars(const vec_t& cov_pars, vec_t& cov_pars_trans) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_trans = vec_t(num_cov_par_);
cov_pars_trans[0] = cov_pars[0];
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
vec_t pars_trans = pars;
re_comps_[unique_clusters_[0]][j]->TransformCovPars(cov_pars[0], pars, pars_trans);
cov_pars_trans.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]) = pars_trans;
}
}
/*!
* \brief Back-transform the covariance parameters to the original scale
* \param cov_pars Covariance parameters
* \param[out] cov_pars_orig Back-transformed, original covariance parameters
*/
void TransformBackCovPars(const vec_t& cov_pars, vec_t& cov_pars_orig) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_orig = vec_t(num_cov_par_);
cov_pars_orig[0] = cov_pars[0];
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
vec_t pars_orig = pars;
re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(cov_pars[0], pars, pars_orig);
cov_pars_orig.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]) = pars_orig;
}
}
/*!
* \brief Calculate covariance matrices of the components
*/
void CalcSigmaComps() {
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
re_comps_[cluster_i][j]->CalcSigma();
}
}
}
/*!
* \brief Calculate matrices A and D_inv as well as their derivatives for the Vecchia approximation for one cluster (independent realization of GP)
* \param num_data_cluster_i Number of data points
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param re_comps_cluster_i Container that collects the individual component models
* \param nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param entries_init_B_cluster_i Triplets for intializing the matrices B
* \param entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param[out] B_cluster_i Matrix A = I - B (= Cholesky factor of inverse covariance) for Vecchia approximation
* \param[out] D_inv_cluster_i Diagonal matrices D^-1 for Vecchia approximation
* \param[out] B_grad_cluster_i Derivatives of matrices A ( = derivative of matrix -B) for Vecchia approximation
* \param[out] D_grad_cluster_i Derivatives of matrices D for Vecchia approximation
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance
*/
void CalcCovFactorVecchia(int num_data_cluster_i, bool calc_gradient,//TODO: make arguments const
std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i,
sp_mat_t& B_cluster_i, sp_mat_t& D_inv_cluster_i, std::vector<sp_mat_t>& B_grad_cluster_i, std::vector<sp_mat_t>& D_grad_cluster_i,
bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
int num_par_comp = re_comps_cluster_i[ind_intercept_gp_]->num_cov_par_;
int num_par_gp = num_par_comp * num_gp_total_ + calc_gradient_nugget;
//Initialize matrices B = I - A and D^-1 as well as their derivatives (in order that the code below can be run in parallel)
B_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//B = I - A
B_cluster_i.setFromTriplets(entries_init_B_cluster_i.begin(), entries_init_B_cluster_i.end());//Note: 1's are put on the diagonal
D_inv_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//D^-1. Note: we first calculate D, and then take the inverse below
D_inv_cluster_i.setIdentity();//Put 1's on the diagonal for nugget effect (entries are not overriden but added below)
if (!transf_scale) {
D_inv_cluster_i.diagonal().array() *= nugget_var;//nugget effect is not 1 if not on transformed scale
}
if (calc_gradient) {
B_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of B = derviateive of (-A)
D_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of D
for (int ipar = 0; ipar < num_par_gp; ++ipar) {
B_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
B_grad_cluster_i[ipar].setFromTriplets(entries_init_B_grad_cluster_i.begin(), entries_init_B_grad_cluster_i.end());
D_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
D_grad_cluster_i[ipar].setIdentity();//Put 0 on the diagonal
D_grad_cluster_i[ipar].diagonal().array() = 0.;//TODO: maybe change initialization of this matrix by also using triplets -> faster?
}
}//end initialization
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_cluster_i; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//calculate covariance matrices between observations and neighbors and among neighbors as well as their derivatives
den_mat_t cov_mat_obs_neighbors(1, num_nn);
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);
std::vector<den_mat_t> cov_grad_mats_obs_neighbors(num_par_gp);//covariance matrix plus derivative wrt to every parameter
std::vector<den_mat_t> cov_grad_mats_between_neighbors(num_par_gp);
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;//index of first parameter (variance) of component j in gradient vectors
if (j == 0) {
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],//re_comp->
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);//write on matrices directly for first GP component
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();//cov_mat_obs_neighbors_j.cwiseProduct()
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
if (calc_gradient) {
cov_grad_mats_obs_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_obs_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
}
}
}//end loop over components j
}//end if(i>1)
//Calculate matrices B and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii}) and its derivatives
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_cluster_i[ind_intercept_gp_ + j]->cov_pars_[0];
if (!transf_scale) {
d_comp_j *= nugget_var;
}
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
D_inv_cluster_i.coeffRef(i, i) += d_comp_j;
if (calc_gradient) {
if (transf_scale) {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = d_comp_j;//derivative of the covariance function wrt the variance. derivative of the covariance function wrt to range is zero on the diagonal
}
else {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = 1.;//1's on the diagonal on the orignal scale
}
}
}
if (calc_gradient && calc_gradient_nugget) {
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) = 1.;
}
//2. remaining terms
if (i > 0) {
if (transf_scale) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
}
else {
cov_mat_between_neighbors.diagonal().array() += nugget_var;
}
den_mat_t A_i(1, num_nn);
den_mat_t cov_mat_between_neighbors_inv;
den_mat_t A_i_grad_sigma2;
if (calc_gradient) {
den_mat_t I(num_nn, num_nn);
I.setIdentity();
cov_mat_between_neighbors_inv = cov_mat_between_neighbors.llt().solve(I);
A_i = cov_mat_obs_neighbors * cov_mat_between_neighbors_inv;
if (calc_gradient_nugget) {
A_i_grad_sigma2 = -A_i * cov_mat_between_neighbors_inv;
}
}
else {
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
}
for (int inn = 0; inn < num_nn; ++inn) {
B_cluster_i.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i(0, inn);
}
D_inv_cluster_i.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
if (calc_gradient) {
den_mat_t A_i_grad(1, num_nn);
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
A_i_grad = (cov_grad_mats_obs_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv) -
(cov_mat_obs_neighbors * cov_mat_between_neighbors_inv *
cov_grad_mats_between_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv);
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[ind_first_par + ipar].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad(0, inn);
}
if (ipar == 0) {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) -= ((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//add to derivative of diagonal elements for marginal variance
}
else {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) = -((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//don't add to existing values since derivative of diagonal is zero for range
}
}
}
if (calc_gradient_nugget) {
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[num_par_gp - 1].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad_sigma2(0, inn);
}
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) -= (A_i_grad_sigma2 * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end calc_gradient
}//end if i > 0
D_inv_cluster_i.coeffRef(i, i) = 1. / D_inv_cluster_i.coeffRef(i, i);
}//end loop over data i
}
/*!
* \brief Create the covariance matrix Psi and factorize it (either calculate a Cholesky factor or the inverse covariance matrix)
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true (only for Vecchia approximation)
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back, normally this is equal to one, since the variance paramter is modelled separately and factored out)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance (only for Vecchia approximation)
*/
void CalcCovFactor(bool calc_gradient = false, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
if (vecchia_approx_) {
for (const auto& cluster_i : unique_clusters_) {
int num_data_cl_i = num_data_per_cluster_[cluster_i];
CalcCovFactorVecchia(num_data_cl_i, calc_gradient, re_comps_[cluster_i], nearest_neighbors_[cluster_i],
dist_obs_neighbors_[cluster_i], dist_between_neighbors_[cluster_i],
entries_init_B_[cluster_i], entries_init_B_grad_[cluster_i], z_outer_z_obs_neighbors_[cluster_i],
B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], transf_scale, nugget_var, calc_gradient_nugget);
}
}
else {
CalcSigmaComps();
for (const auto& cluster_i : unique_clusters_) {
if (use_woodbury_identity_) {//Use Woodburry matrix inversion formula: used only if there are only grouped REs
//Construct matrix Sigma^-1
std::vector<Triplet_t> triplets;
triplets.reserve(cum_num_rand_eff_[cluster_i][num_comps_total_]);
for (int j = 0; j < num_comps_total_; ++j) {
double sigmaI = re_comps_[cluster_i][j]->cov_pars_[0];
sigmaI = 1.0 / sigmaI;
for (int i = cum_num_rand_eff_[cluster_i][j]; i < cum_num_rand_eff_[cluster_i][j + 1]; ++i) {
triplets.emplace_back(i, i, sigmaI);
}
}
sp_mat_t SigmaI(cum_num_rand_eff_[cluster_i][num_comps_total_], cum_num_rand_eff_[cluster_i][num_comps_total_]);
SigmaI.setFromTriplets(triplets.begin(), triplets.end());
T1 SigmaIplusZtZ = SigmaI + ZtZ_[cluster_i];
CalcChol<T1>(SigmaIplusZtZ, cluster_i, do_symbolic_decomposition_);
//for (int i = 0; i < (int)SigmaIplusZtZ.rows(); ++i) {//For debugging only
// for (int j = 0; j < (int)SigmaIplusZtZ.cols(); ++j) {
// Log::Info("SigmaIplusZtZ(%d,%d) %f", i, j, SigmaIplusZtZ.coeffRef(i, j));
// }
//}
//Log::Info("");
//for (int i = 0; i < (int)chol_facts_[cluster_i].rows(); ++i) {//For debugging only
// for (int j = 0; j < (int)chol_facts_[cluster_i].cols(); ++j) {
// Log::Info("chol_facts_[cluster_i](%d,%d) %f", i, j, chol_facts_[cluster_i].coeffRef(i, j));
// }
//}
}//end use_woodbury_identity_
else {
T1 psi;
psi.resize(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
psi.setIdentity();
for (int j = 0; j < num_comps_total_; ++j) {
psi += (*(re_comps_[cluster_i][j]->GetZSigmaZt()));
}
CalcChol<T1>(psi, cluster_i, do_symbolic_decomposition_);
}
}
do_symbolic_decomposition_ = false;//Symbolic decompostion done only once (if sparse matrices are used)
}
}
/*!
* \brief Calculate Psi^-1*y (and save in y_aux_)
* \param marg_variance The marginal variance. Default = 1.
*/
void CalcYAux(double marg_variance = 1.) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (B_.find(cluster_i) == B_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
y_aux_[cluster_i] = B_[cluster_i].transpose() * D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
if (use_woodbury_identity_) {
vec_t Zty = Zt_[cluster_i] * y_[cluster_i];
vec_t MInvZty = chol_facts_solve_[cluster_i].solve(Zty);
y_aux_[cluster_i] = y_[cluster_i] - Zt_[cluster_i].transpose() * MInvZty;
}
else {
//Version 1: let Eigen do the computation
y_aux_[cluster_i] = chol_facts_solve_[cluster_i].solve(y_[cluster_i]);
//// Version 2 'do-it-yourself' (for sparse matrices)
//y_aux_[cluster_i] = y_[cluster_i];
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
//sp_L_t_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
}
}//end non-Vecchia
if (marg_variance != 1.) {
y_aux_[cluster_i] /= marg_variance;
}
}
y_aux_has_been_calculated_ = true;
}
/*!
* \brief Calculate y^T*Psi^-1*y if sparse matrices are used
* \param[out] yTPsiInvy y^T*Psi^-1*y
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcYTPsiIInvY(double& yTPsiInvy) {
yTPsiInvy = 0;
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (B_.find(cluster_i) == B_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i];
yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0);
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt;
const double* val = chol_facts_[cluster_i].valuePtr();
const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
if (use_woodbury_identity_) {
y_aux_sqrt = Zt_[cluster_i] * y_[cluster_i];
sp_L_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], y_aux_sqrt.data());
yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
else {
y_aux_sqrt = y_[cluster_i];
sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_sqrt.data());
yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
}//end not vecchia_approx_
}
}
/*!
* \brief Calculate y^T*Psi^-1*y if dense matrices are used
* \param[out] yTPsiInvy y^T*Psi^-1*y
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcYTPsiIInvY(double& yTPsiInvy) {
yTPsiInvy = 0;
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (B_.find(cluster_i) == B_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i];
yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0);
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt;
if (use_woodbury_identity_) {
y_aux_sqrt = Zt_[cluster_i] * y_[cluster_i];
L_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], y_aux_sqrt.data());
yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
else {
y_aux_sqrt = y_[cluster_i];
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], y_aux_sqrt.data());
yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
}//end not vecchia_approx_
}
}
/*!
* \brief Calculate gradient for covariance parameters
* \param include_error_var If true, the gradient for the marginal variance parameter (=error, nugget effect) is also calculated, otherwise not (set this to true if the nugget effect is not calculated by using the closed-form solution)
* \param save_psi_inv If true, the inverse covariance matrix Pis^-1 is saved for reuse later (e.g. when calculating the Fisher information in Fisher scoring). This option is ignored if the Vecchia approximation is used.
* \return Gradient for covariance parameters
*/
vec_t GetCovParGrad(bool include_error_var = false, bool save_psi_inv = false) {
vec_t cov_grad;
if (include_error_var) {
cov_grad = vec_t::Zero(num_cov_par_);
}
else {
cov_grad = vec_t::Zero(num_cov_par_ - 1);
}
int first_cov_par = include_error_var ? 1 : 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {//Vechia approximation
vec_t u(num_data_per_cluster_[cluster_i]);
vec_t uk(num_data_per_cluster_[cluster_i]);
if (include_error_var) {
u = B_[cluster_i] * y_[cluster_i];
cov_grad[0] += -1. * ((double)(u.transpose() * D_inv_[cluster_i] * u)) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
u = D_inv_[cluster_i] * u;
}
else {
u = D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];//TODO: this is already calculated in CalcYAux -> save it there and re-use here?
}
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_comp = re_comps_[cluster_i][j]->num_cov_par_;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
uk = B_grad_[cluster_i][num_par_comp * j + ipar] * y_[cluster_i];
cov_grad[first_cov_par + ind_par_[j] + ipar] += ((uk.dot(u) - 0.5 * u.dot(D_grad_[cluster_i][num_par_comp * j + ipar] * u)) / sigma2_ +
0.5 * (D_inv_[cluster_i].diagonal()).dot(D_grad_[cluster_i][num_par_comp * j + ipar].diagonal()));
}
}
}//end vecchia_approx_
else {//not vecchia_approx_
T1 psi_inv;
CalcPsiInv(psi_inv, cluster_i);
if (save_psi_inv) {
psi_inv_[cluster_i] = psi_inv;
}
if (include_error_var) {
cov_grad[0] += -1. * ((double)(y_[cluster_i].transpose() * y_aux_[cluster_i])) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
}
for (int j = 0; j < num_comps_total_; ++j) {
for (int ipar = 0; ipar < re_comps_[cluster_i][j]->num_cov_par_; ++ipar) {
std::shared_ptr<T1> gradPsi = re_comps_[cluster_i][j]->GetZSigmaZtGrad(ipar, true, 1.);
cov_grad[first_cov_par + ind_par_[j] + ipar] += -1. * ((double)(y_aux_[cluster_i].transpose() * (*gradPsi) * y_aux_[cluster_i])) / sigma2_ / 2. +
((double)(((*gradPsi).cwiseProduct(psi_inv)).sum())) / 2.;
}
}
}//end standard (non-Vecchia) calculation
}// end loop over clusters
return(cov_grad);
}
/*!
* \brief Apply a momentum step
* \param it Iteration number
* \param[out] pars Parameters
* \param[out] pars_lag1 Parameters from last iteration
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true
* \param nesterov_acc_rate Acceleration rate for Nesterov acceleration
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param exclude_first_log_scale If true, no momentum is applied to the first value and the momentum step is done on the log-scale for the other values. Default = true
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
*/
void ApplyMomentumStep(int it, vec_t& pars, vec_t& pars_lag1, bool use_nesterov_acc = true,
double nesterov_acc_rate = 0.5, int nesterov_schedule_version = 0, bool exclude_first_log_scale = true,
int momentum_offset = 2) {
if (use_nesterov_acc) {
double mu = NesterovSchedule(it, nesterov_schedule_version, nesterov_acc_rate, momentum_offset);
int num_par = (int)pars.size();
vec_t pars_mom(num_par);//Covariance parameters plus a momentum step
if (exclude_first_log_scale) {
pars_mom.segment(1, num_par - 1) = ((mu + 1.) * (pars.segment(1, num_par - 1).array().log()) - mu * (pars_lag1.segment(1, num_par - 1).array().log())).exp().matrix();//Momentum is added on the log scale
pars_mom[0] = pars[0];
}
else {
pars_mom = (mu + 1) * pars - mu * pars_lag1;
}
pars_lag1 = pars;
pars = pars_mom;
}
else {
pars_lag1 = pars;
}
}
/*!
* \brief Update covariance parameters doing one gradient descent step (except for the marginal variance which is updated using an explicit solution)
* \param lr Learning rate
* \param[out] cov_pars Covariance parameters
* \param closed_form_solution_sigma If true, the error variance (nugget effect) is calculated exactly using a closed form expression
*/
void UpdateCovParGradOneIter(double lr, vec_t& cov_pars, bool closed_form_solution_sigma = true) {
vec_t grad;
if (closed_form_solution_sigma) {
cov_pars[0] = 0.;
for (const auto& cluster_i : unique_clusters_) {
cov_pars[0] += (double)(y_[cluster_i].transpose() * y_aux_[cluster_i]);
}
cov_pars[0] /= num_data_;
sigma2_ = cov_pars[0];
grad = GetCovParGrad(false, false);
cov_pars.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr * grad.array()).exp().matrix();
}
else {
grad = GetCovParGrad(true, false);
cov_pars = (cov_pars.array().log() - lr * grad.array()).exp().matrix();
}
//for (int i = 0; i < (int)grad.size(); ++i) { Log::Debug("grad[%d]: %f", i, grad[i]); }//For debugging only
}
/*!
* \brief Update covariance parameters doing one step of Fisher scoring (except for the marginal variance which is updated using an explicit solution)
* \param[out] cov_pars Covariance parameters
* \param closed_form_solution_sigma If true, the error variance (nugget effect) is calculated exactly using a closed form expression
*/
void UpdateCovParFisherScoringOneIter(vec_t& cov_pars, bool closed_form_solution_sigma = false) {
vec_t grad;
den_mat_t FI;
if (closed_form_solution_sigma) {
cov_pars[0] = 0.;
for (const auto& cluster_i : unique_clusters_) {
cov_pars[0] += (double)(y_[cluster_i].transpose() * y_aux_[cluster_i]);
}
cov_pars[0] /= num_data_;
sigma2_ = cov_pars[0];
grad = GetCovParGrad(false, true);
CalcFisherInformation(cov_pars, FI, true, false, true);
vec_t update = FI.llt().solve(grad);
cov_pars.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - update.array()).exp().matrix();//make update on log-scale
}
else {
grad = GetCovParGrad(true, true);
CalcFisherInformation(cov_pars, FI, true, true, true);
vec_t update = FI.llt().solve(grad);
cov_pars = (cov_pars.array().log() - update.array()).exp().matrix();//make update on log-scale
}
////For debugging only
//for (int i = 0; i < (int)grad.size(); ++i) { Log::Debug("grad[%d]: %f", i, grad[i]); }
////For debugging only
//if (FI.cols() >= 3) {
// for (int i = 0; i < FI.rows(); ++i) { Log::Debug("FI[%d,:]: %f, %f, %f", i, FI.coeffRef(i, 0), FI.coeffRef(i, 1), FI.coeffRef(i, 2)); }
//}
//else {
// for (int i = 0; i < FI.rows(); ++i) { Log::Debug("FI[%d,:]: %f, %f", i, FI.coeffRef(i, 0), FI.coeffRef(i, 1)); }
//}
}
/*!
* \brief Update linear fixed-effect coefficients doing one gradient descent step
* \param lr Learning rate
* \param marg_var Marginal variance parameters sigma^2
* \param X Covariate data for linear fixed-effect
* \param[out] beta Linear regression coefficients
*/
void UpdateCoefGradOneIter(double lr, double marg_var, den_mat_t& X, vec_t& beta) {
vec_t y_aux(num_data_);
GetYAux(y_aux);
beta += lr * (1. / marg_var) * (X.transpose()) * y_aux;
}
/*!
* \brief Update linear fixed-effect coefficients using generalized least squares (GLS)
* \param X Covariate data for linear fixed-effect
* \param[out] beta Linear regression coefficients
*/
void UpdateCoefGLS(den_mat_t& X, vec_t& beta) {
vec_t y_aux(num_data_);
GetYAux(y_aux);
den_mat_t XT_psi_inv_X;
CalcXTPsiInvX(X, XT_psi_inv_X);
beta = XT_psi_inv_X.llt().solve(X.transpose() * y_aux);
}
/*!
* \brief Check whether NaN's are presend
* \param par Vector of parameters that should be checked
*/
void CheckNaNInf(vec_t& par) {
if (std::isnan(par[0]) || std::isinf(par[0])) {
Log::Fatal("NaN or Inf occurred. If this is a problem, consider doing the following. If you have used Fisher scoring, try using gradient descent. If you have used gradient descent, consider using a smaller learning rate.");
}
}
/*!
* \brief Calculate the Fisher information for covariance parameters. Note: you need to call CalcCovFactor first
* \param cov_pars Covariance parameters
* \param[out] FI Fisher information
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise on the original scale. Default = true
* \param include_error_var If true, the marginal variance parameter is also included, otherwise not
* \param use_saved_psi_inv If false, the inverse covariance matrix Psi^-1 is calculated, otherwise a saved version is used
*/
void CalcFisherInformation(const vec_t& cov_pars, den_mat_t& FI, bool transf_scale = true,
bool include_error_var = false, bool use_saved_psi_inv = false) {
if (include_error_var) {
FI = den_mat_t(num_cov_par_, num_cov_par_);
}
else {
FI = den_mat_t(num_cov_par_ - 1, num_cov_par_ - 1);
}
FI.setZero();
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
//Note: if transf_scale==false, then all matrices and derivatives have been calculated on the original scale for the Vecchia approximation, that is why there is no adjustment here
//Calculate auxiliary matrices for use below
sp_mat_t Identity(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
Identity.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_cs_solve(B_[cluster_i], Identity, B_inv, true);
sp_mat_t D = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D.setIdentity();
D.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(-1);
sp_mat_t D_inv_2 = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D_inv_2.setIdentity();
D_inv_2.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(2);
//Calculate derivative(B) * B^-1
std::vector<sp_mat_t> B_grad_B_inv(num_cov_par_ - 1);
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
B_grad_B_inv[par_nb] = B_grad_[cluster_i][par_nb] * B_inv;
}
//Calculate Fisher information
int start_cov_pars = include_error_var ? 1 : 0;
sp_mat_t D_inv_B_grad_B_inv, B_grad_B_inv_D;
if (include_error_var) {
//First calculate terms for nugget effect / noise variance parameter
if (transf_scale) {//Optimization is done on transformed scale (in particular, log-scale)
//The derivative for the nugget variance on the log scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += (double)((D_inv_[cluster_i].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum()) / 2.;
}
}
else {//Original scale for asymptotic covariance matrix
int ind_grad_nugget = num_cov_par_ - 1;
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][ind_grad_nugget] * B_inv;
B_grad_B_inv_D = B_grad_[cluster_i][ind_grad_nugget] * B_inv * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array()).sum());
FI(0, 0) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
B_grad_B_inv_D = B_grad_B_inv[par_nb] * D;
diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum());
FI(0, par_nb + 1) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
}
}
}
//Remaining covariance parameters
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_B_inv[par_nb];
for (int par_nb_cross = par_nb; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) {
B_grad_B_inv_D = B_grad_B_inv[par_nb_cross] * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array() * D_grad_[cluster_i][par_nb_cross].diagonal().array()).sum());
FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
}
}
}//end vecchia_approx_
else {//not vecchia_approx_
T1 psi_inv;
if (use_saved_psi_inv) {
psi_inv = psi_inv_[cluster_i];
}
else {
CalcPsiInv(psi_inv, cluster_i);
}
if (!transf_scale) {
psi_inv /= cov_pars[0];//psi_inv has been calculated with a transformed parametrization, so we need to divide everything by cov_pars[0] to obtain the covariance matrix
}
//Calculate Psi^-1 * derivative(Psi)
std::vector<T1> psi_inv_deriv_psi(num_cov_par_ - 1);
int deriv_par_nb = 0;
for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly
for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) {
psi_inv_deriv_psi[deriv_par_nb] = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0]));
deriv_par_nb++;
}
}
//Calculate Fisher information
int start_cov_pars = include_error_var ? 1 : 0;
if (include_error_var) {
//First calculate terms for nugget effect / noise variance parameter
if (transf_scale) {//Optimization is done on transformed scale (in particular, log-scale)
//The derivative for the nugget variance on the log scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += psi_inv_deriv_psi[par_nb].diagonal().sum() / 2.;
}
}
else {//Original scale for asymptotic covariance matrix
//The derivative for the nugget variance is the identity matrix, i.e. psi_inv_grad_psi_sigma2 = psi_inv.
FI(0, 0) += ((double)(psi_inv.cwiseProduct(psi_inv)).sum()) / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += ((double)(psi_inv.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.;
}
}
}
//Remaining covariance parameters
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
T1 psi_inv_grad_psi_par_nb_T = psi_inv_deriv_psi[par_nb].transpose();
FI(par_nb + start_cov_pars, par_nb + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.;
for (int par_nb_cross = par_nb + 1; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) {
FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb_cross])).sum()) / 2.;
}
psi_inv_deriv_psi[par_nb].resize(0, 0);//not needed anymore
psi_inv_grad_psi_par_nb_T.resize(0, 0);
}
}//end not vecchia_approx_
}//end loop over clusters
FI.triangularView<Eigen::StrictlyLower>() = FI.triangularView<Eigen::StrictlyUpper>().transpose();
//for (int i = 0; i < (int)FI.rows(); ++i) {//For debugging only
// for (int j = i; j < (int)FI.cols(); ++j) {
// Log::Info("FI(%d,%d) %f", i, j, FI(i, j));
// }
//}
}
/*!
* \brief Calculate the standard deviations for the MLE of the covariance parameters as the diagonal of the inverse Fisher information (on the orignal scale and not the transformed scale used in the optimization)
* \param cov_pars MLE of covariance parameters
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCovPar(const vec_t& cov_pars, vec_t& std_dev) {
SetCovParsComps(cov_pars);
CalcCovFactor(true, false, cov_pars[0], true);
den_mat_t FI;
CalcFisherInformation(cov_pars, FI, false, true, false);
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
/*!
* \brief Calculate the standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information
* \param cov_pars MLE of covariance parameters
* \param X Covariate data for linear fixed-effect
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCoef(vec_t& cov_pars, const den_mat_t& X, vec_t& std_dev) {
if ((int)std_dev.size() >= num_data_) {
Log::Warning("Sample size too small to calculate standard deviations for coefficients");
for (int i = 0; i < (int)std_dev.size(); ++i) {
std_dev[i] = std::numeric_limits<double>::quiet_NaN();
}
}
else {
SetCovParsComps(cov_pars);
CalcCovFactor(false, true, 1., false);
den_mat_t FI((int)X.cols(), (int)X.cols());
CalcXTPsiInvX(X, FI);
FI /= cov_pars[0];
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) (for one cluster
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Number of prediction locations
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param re_group_levels_pred Group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
* \param re_group_rand_coef_data_pred Random coefficient data for grouped REs
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPred(gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const std::vector<std::vector<string_t>>& re_group_levels_pred, const double* re_group_rand_coef_data_pred,
const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
// Vector which contains covariance matrices needed for making predictions in the following order:
// 0. Ztilde*Sigma*Z^T, 1. Zstar*Sigmatilde^T*Z^T, 2. Ztilde*Sigma*Ztilde^T, 3. Ztilde*Sigmatilde*Zstar^T, 4. Zstar*Sigmastar*Zstar^T
std::vector<T1> pred_mats(5);
//Define which covariance matrices are zero ('false') or non-zero ('true')
std::vector<bool> active_mats{ false, false, false, false, false };
if (num_re_group_total_ > 0) {
active_mats[0] = true;
active_mats[2] = true;
active_mats[4] = true;
}
if (num_gp_total_ > 0) {
active_mats[1] = true;
active_mats[4] = true;
}
//Initialize covariance matrices
for (int i = 0; i < 2; ++i) {
if (active_mats[i]) {
pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]);
pred_mats[i].setZero();
}
}
if (predict_cov_mat) {
for (int i = 2; i < 5; ++i) {
if (active_mats[i]) {
pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
pred_mats[i].setZero();
}
}
}
//Calculate covariance matrices
int cn = 0;//component number
if (num_re_group_ > 0) {
//Grouped random effects
for (int j = 0; j < num_re_group_; ++j) {
std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
group_data.push_back(re_group_levels_pred[j][id]);
}
re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat);
cn += 1;
}
if (num_re_group_rand_coef_ > 0) {
//Random coefficient grouped random effects
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data_pred[j * num_data_pred + id]);
group_data.push_back(re_group_levels_pred[ind_effect_group_rand_coef_[j] - 1][id]);//subtract 1 since counting starts at one for this index
}
re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat, rand_coef_data.data());
cn += 1;
}
}
}
if (num_gp_ > 0) {
//Gaussian process
std::shared_ptr<RECompGP<T1>> re_comp_base = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]);
re_comp_base->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat);
cn += 1;
if (num_gp_rand_coef_ > 0) {
std::shared_ptr<RECompGP<T1>> re_comp;
//Random coefficient Gaussian processes
for (int j = 0; j < num_gp_rand_coef_; ++j) {
re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]);
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
re_comp->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat, rand_coef_data.data());
cn += 1;
}
}
}
T1 M_aux(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]);//Ztilde*Sigma*Z^T + Zstar*Sigmatilde^T*Z^T
M_aux.setZero();
for (int i = 0; i < 2; ++i) {
if (active_mats[i]) {
M_aux += pred_mats[i];
}
}
mean_pred_id = M_aux * y_aux_[cluster_i];
if (predict_cov_mat) {
cov_mat_pred_id.setIdentity();
for (int i = 2; i < 5; ++i) {
if (active_mats[i]) {
cov_mat_pred_id += pred_mats[i];
if (i == 3) {//Ztilde*Sigmatilde*Zstar^T
cov_mat_pred_id += T1(pred_mats[i].transpose());
}
}
}
if (use_woodbury_identity_) {
T1 ZtM_aux = T1(Zt_[cluster_i] * M_aux.transpose());
cov_mat_pred_id -= (M_aux * T1(M_aux.transpose()) - ZtM_aux.transpose() * chol_facts_solve_[cluster_i].solve(ZtM_aux));
}
else {
cov_mat_pred_id -= (M_aux * (chol_facts_solve_[cluster_i].solve(T1(M_aux.transpose()))));
}
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Number of prediction locations
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_pred_cli);
if (CondObsOnly) {
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, num_data_cli - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, -1);
}
//Random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_pred_cli);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_;//First entries are the observed data, then the predicted data
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {//TODO: maybe do the following in parallel? (see CalcPredVecchiaPredictedFirstOrder)
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[num_data_cli + i];
if ((num_data_cli + i) > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_Bpo, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
entries_init_Bpo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli, 0.));
}
}
}
sp_mat_t Bpo(num_data_pred_cli, num_data_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bpo.setFromTriplets(entries_init_Bpo.begin(), entries_init_Bpo.end());//initialize matrices (in order that the code below can be run in parallel)
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Dp(num_data_pred_cli, num_data_pred_cli);
Dp.setIdentity();//Put 1 on the diagonal (for nugget effect)
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
Dp.coeffRef(i, i) += d_comp_j;
}
//2. remaining terms
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
Bpo.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli) -= A_i(0, inn);
}
}
Dp.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}//end loop over data i
mean_pred_id = -Bpo * y_[cluster_i];
if (!CondObsOnly) {
sp_L_solve(Bp.valuePtr(), Bp.innerIndexPtr(), Bp.outerIndexPtr(), num_data_pred_cli, mean_pred_id.data());
}
if (predict_cov_mat) {
if (CondObsOnly) {
cov_mat_pred_id = Dp;
}
else {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t Bp_inv;
eigen_sp_Lower_sp_RHS_cs_solve(Bp, Identity, Bp_inv, true);
cov_mat_pred_id = T1(Bp_inv * Dp * Bp_inv.transpose());
}
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable proces when prediction locations appear first in the ordering
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Number of prediction locations
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaPredictedFirstOrder(gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_tot, dim_gp_coords_);
coords_all << gp_coords_mat_pred, gp_coords_mat_obs;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_tot);
find_nearest_neighbors_Veccia_fast(coords_all, num_data_tot, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
//Prepare data for random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_tot);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data(num_data_tot);//First entries are the predicted data, then the observed data
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
rand_coef_data[i] = gp_rand_coef_data_pred[j * num_data_pred + data_indices_per_cluster_pred[cluster_i][i]];
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_cli; ++i) {
rand_coef_data[num_data_pred_cli + i] = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_[i];
}
//re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_
//for (int i = 0; i < rand_coef_data.size(); ++i) {
// Log::Info("rand_coef_data[%d]: %f", i, rand_coef_data[i]);
//}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bo, Bop, and Bp
std::vector<Triplet_t> entries_init_Bo, entries_init_Bop, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
for (int i = 0; i < num_data_cli; ++i) {
entries_init_Bo.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i + num_data_pred_cli].size(); ++inn) {
if (nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
entries_init_Bop.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] - num_data_pred_cli, 0.));
}
}
}
sp_mat_t Bo(num_data_cli, num_data_cli);
sp_mat_t Bop(num_data_cli, num_data_pred_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bo.setFromTriplets(entries_init_Bo.begin(), entries_init_Bo.end());//initialize matrices (in order that the code below can be run in parallel)
Bop.setFromTriplets(entries_init_Bop.begin(), entries_init_Bop.end());
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Do_inv(num_data_cli, num_data_cli);
sp_mat_t Dp_inv(num_data_pred_cli, num_data_pred_cli);
Do_inv.setIdentity();//Put 1 on the diagonal (for nugget effect)
Dp_inv.setIdentity();
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) += d_comp_j;
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) += d_comp_j;
}
}
//2. remaining terms
if (i > 0) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (i < num_data_pred_cli) {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
if (nearest_neighbors_cluster_i[i][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
Bop.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bo.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn] - num_data_pred_cli) -= A_i(0, inn);
}
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) = 1 / Dp_inv.coeffRef(i, i);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) = 1 / Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli);
}
}//end loop over data i
sp_mat_t cond_prec = Bp.transpose() * Dp_inv * Bp + Bop.transpose() * Do_inv * Bop;
chol_sp_mat_t CholFact;
CholFact.compute(cond_prec);
if (predict_cov_mat) {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t cond_prec_chol = CholFact.matrixL();
sp_mat_t cond_prec_chol_inv;
eigen_sp_Lower_sp_RHS_cs_solve(cond_prec_chol, Identity, cond_prec_chol_inv, true);
cov_mat_pred_id = T1(cond_prec_chol_inv.transpose() * cond_prec_chol_inv);
mean_pred_id = -cov_mat_pred_id * Bop.transpose() * Do_inv * Bo * y_[cluster_i];
}
else {
mean_pred_id = -CholFact.solve(Bop.transpose() * Do_inv * Bo * y_[cluster_i]);
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the latent process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaLatentObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i,
std::map<gp_id_t, int>& num_data_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
if (num_gp_rand_coef_ > 0) {
Log::Fatal("The Vecchia approximation for the latent process is currently not implemented when having random coefficients");
}
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
//Determine number of unique observartion locations
std::vector<int> uniques;//unique points
std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates
DetermineUniqueDuplicateCoords(gp_coords_mat_obs, num_data_cli, uniques, unique_idx);
int num_coord_unique_obs = (int)uniques.size();
//Determine unique locations (observed and predicted)
DetermineUniqueDuplicateCoords(coords_all, num_data_tot, uniques, unique_idx);
int num_coord_unique = (int)uniques.size();
den_mat_t coords_all_unique;
if ((int)uniques.size() == num_data_tot) {//no multiple observations at the same locations -> no incidence matrix needed
coords_all_unique = coords_all;
}
else {
coords_all_unique = coords_all(uniques, Eigen::all);
}
//Determine incidence matrices
sp_mat_t Z_o = sp_mat_t(num_data_cli, uniques.size());
sp_mat_t Z_p = sp_mat_t(num_data_pred_cli, uniques.size());
for (int i = 0; i < num_data_tot; ++i) {
if (i < num_data_cli) {
Z_o.insert(i, unique_idx[i]) = 1.;
}
else {
Z_p.insert(i - num_data_cli, unique_idx[i]) = 1.;
}
}
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_coord_unique);
if (CondObsOnly) {//find neighbors among both the observed locations only
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, num_coord_unique_obs - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_B;
for (int i = 0; i < num_coord_unique; ++i) {
entries_init_B.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_B.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
sp_mat_t B(num_coord_unique, num_coord_unique);
B.setFromTriplets(entries_init_B.begin(), entries_init_B.end());//initialize matrices (in order that the code below can be run in parallel)
sp_mat_t D(num_coord_unique, num_coord_unique);
D.setIdentity();
D.diagonal().array() = 0.;
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_coord_unique; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
D.coeffRef(i, i) = re_comps_[cluster_i][ind_intercept_gp_]->cov_pars_[0];
//2. remaining terms
if (i > 0) {
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
B.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
D.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end loop over data i
//Calculate D_inv and B_inv in order to calcualte Sigma and Sigma^-1
sp_mat_t D_inv(num_coord_unique, num_coord_unique);
D_inv.setIdentity();
D_inv.diagonal().array() = D.diagonal().array().pow(-1);
sp_mat_t Identity_all(num_coord_unique, num_coord_unique);
Identity_all.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_cs_solve(B, Identity_all, B_inv, true);
//Calculate inverse of covariance matrix for observed data using the Woodbury identity
sp_mat_t Z_o_T = Z_o.transpose();
sp_mat_t M_aux_Woodbury = B.transpose() * D_inv * B + Z_o_T * Z_o;
chol_sp_mat_t CholFac_M_aux_Woodbury;
CholFac_M_aux_Woodbury.compute(M_aux_Woodbury);
if (predict_cov_mat) {
//Using Eigen's solver
sp_mat_t M_aux_Woodbury2 = CholFac_M_aux_Woodbury.solve(Z_o_T);
sp_mat_t Identity_obs(num_data_cli, num_data_cli);
Identity_obs.setIdentity();
sp_mat_t ZoSigmaZoT_plusI_Inv = -Z_o * M_aux_Woodbury2 + Identity_obs;
sp_mat_t ZpSigmaZoT = Z_p * B_inv * D * B_inv.transpose() * Z_o_T;
sp_mat_t M_aux = ZpSigmaZoT * ZoSigmaZoT_plusI_Inv;
mean_pred_id = M_aux * y_[cluster_i];
sp_mat_t Identity_pred(num_data_pred_cli, num_data_pred_cli);
Identity_pred.setIdentity();
cov_mat_pred_id = T1(Z_p * B_inv * D * B_inv.transpose() * Z_p.transpose() + Identity_pred - M_aux * ZpSigmaZoT.transpose());
}
else {
vec_t resp_aux = Z_o_T * y_[cluster_i];
vec_t resp_aux2 = CholFac_M_aux_Woodbury.solve(resp_aux);
resp_aux = y_[cluster_i] - Z_o * resp_aux2;
mean_pred_id = Z_p * B_inv * D * B_inv.transpose() * Z_o_T * resp_aux;
}
}
friend class REModel;
};
} // namespace GPBoost
#endif // GPB_RE_MODEL_TEMPLATE_H_
|
c_jacobi01.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
Copyright (c) 2004, OmpSCR Group
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of La Laguna nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
FILE: c_jacobi01.c
VERSION: 1.1
DATE: Oct 2004
AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003
anmey@rz.rwth-aachen.de
http://www.rwth-aachen.de/People/D.an.Mey.html
COMMENTS TO: ompscr@etsii.ull.es
DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation :
(d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method.
COMMENTS: OpenMP version 1: two parallel regions with one parallel loop each, the naive approach.
Directives are used in this code to achieve paralleism.
All do loops are parallized with default 'static' scheduling.
REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html
BASIC PRAGMAS: parallel for
USAGE: ./c_jacobi01.par 5000 5000 0.8 1.0 1000
INPUT: n - grid dimension in x direction
m - grid dimension in y direction
alpha - Helmholtz constant (always greater than 0.0)
tol - error tolerance for iterative solver
relax - Successice over relaxation parameter
mits - Maximum iterations for iterative solver
OUTPUT: Residual and error
u(n,m) - Dependent variable (solutions)
f(n,m) - Right hand side function
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
//#include "OmpSCR.h"
#define U(i,j) u[(i)*n+(j)]
#define F(i,j) f[(i)*n+(j)]
#define NUM_ARGS 6
#define NUM_TIMERS 1
#define NIN 4
#define MIN 4
#define ALPHA 0.1
#define TOL 0.1
#define RELAX 2
#define MITS 2
int n, m, mits;
double tol, relax, alpha;
void jacobi (int n, int m, double dx, double dy,
double alpha, double omega,
double *u, double *f,
double tol, int maxit );
/******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(
int n,
int m,
double alpha,
double *dx,
double *dy,
double *u,
double *f)
{
int i,j,xx,yy;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initilize initial condition and RHS */
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + *dx * (i-1);
yy = -1.0 + *dy * (j-1);
U(j,i) = 0.0;
F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy)
- 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy);
}
}
}
/************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(
int n,
int m,
double alpha,
double dx,
double dy,
double *u,
double *f)
{
int i,j;
double xx, yy, temp, error;
dx = 2.0 / (n-1);
dy = 2.0 / (n-2);
error = 0.0;
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy);
error += temp*temp;
}
}
error = sqrt(error)/(n*m);
printf("Solution Error : %g\n", error);
}
int main(int argc, char **argv){
double *u, *f, dx, dy;
double dt, mflops;
int NUMTHREADS;
// char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =",
// "Successive over-relaxation parameter =",
// "error tolerance for iterative solver =", "Maximum iterations for solver ="};
// char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
// char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"};
NUMTHREADS = 1; //omp_get_num_threads();
//OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacobi01' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS,
// PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
// argc, argv);
n = NIN; // OSCR_getarg_int(1);
m = MIN; // OSCR_getarg_int(2);
alpha = ALPHA; // OSCR_getarg_double(3);
relax = RELAX; // OSCR_getarg_double(4);
tol = TOL; // OSCR_getarg_double(5);
mits = MITS; // OSCR_getarg_int(6);
printf("-> %d, %d, %g, %g, %g, %d\n",
n, m, alpha, relax, tol, mits);
u = (double *) malloc(n*m*sizeof(double));
f = (double *) malloc(n*m*sizeof(double));
/* arrays are allocated and initialzed */
initialize(n, m, alpha, &dx, &dy, u, f);
/* Solve Helmholtz eqiation */
//OSCR_timer_start(0);
jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits);
//OSCR_timer_stop(0);
dt = 1; //OSCR_timer_read(0);
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
error_check(n, m, alpha, dx, dy, u, f);
//OSCR_report(1, TIMERS_NAMES);
return 0;
}
/*
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************
*/
void jacobi ( const int n, const int m, double dx, double dy, double alpha,
double omega, double *u, double *f, double tol, int maxit )
{
int i,j,k;
double error, resid, ax, ay, b;
double *uold;
/* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf)
getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das
gespiegelte Problem loesen wollen */
uold = (double *)malloc(sizeof(double) * n *m);
ax = 1.0/(dx * dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y_direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while (k <= maxit && error > tol) {
error = 0.0;
/* copy new solution into old */
#pragma omp parallel for private(i)
for (j=0; j<m; j++)
for (i=0; i<n; i++)
uold[i + m*j] = u[i + m*j];
/* compute stencil, residual and update */
#pragma omp parallel for reduction(+:error) private(i,resid)
for (j=1; j<m-1; j++)
for (i=1; i<n-1; i++){
resid =(
ax * (uold[i-1 + m*j] + uold[i+1 + m*j])
+ ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)])
+ b * uold[i + m*j] - f[i + m*j]
) / b;
/* update solution */
u[i + m*j] = uold[i + m*j] - omega * resid;
/* accumulate residual error */
error =error + resid*resid;
}
/* error check */
k++;
error = sqrt(error) /(n*m);
} /* while */
printf("Total Number of Iterations %d\n", k);
printf("Residual %.15f\n\n", error);
free(uold);
}
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,
% const ComplexOperator op,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
image->storage_class=DirectClass;
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
image->storage_class=DirectClass;
image->depth=32UL;
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(images,complex_images,images->rows,1)
#endif
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict Ai,
*restrict Ar,
*restrict Bi,
*restrict Br;
register PixelPacket
*restrict Ci,
*restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,images->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,images->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,images->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,images->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,images->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,images->columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) images->columns; x++)
{
switch (op)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr);
Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red);
Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red);
gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+
snr);
Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green);
Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green);
gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue);
Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity*
Bi->opacity+snr);
Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity*
Bi->opacity);
Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity*
Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red);
Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green);
Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5;
Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity);
Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red);
Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red);
Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green);
Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green);
Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue);
Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity*
Bi->opacity);
Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity*
Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ComplexImages)
#endif
proceed=SetImageProgress(images,ComplexImageTag,progress++,
images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(height,width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) CopyMagickMemory(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[-x+width/2L-1L]=forward_pixels[x+width/2L+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) ResetMagickMemory(magnitude_pixels,0,fourier_info->height*
fourier_info->width*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) ResetMagickMemory(phase_pixels,0,fourier_info->height*
fourier_info->width*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
ResetMagickMemory(source_pixels,0,fourier_info->height*fourier_info->width*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute(fftw_r2c_plan);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
size_t
extent;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
extent,
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t)fourier_info->height,
fourier_info->width*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) CopyMagickMemory(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) CopyMagickMemory(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize Fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
{
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute(fftw_c2r_plan);
fftw_destroy_plan(fftw_c2r_plan);
}
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
size_t
extent;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
diffusion.c | /*
* ======================= PGF ====================
* Integrate forward (advection only) by one time step.
* ATMS 502 / CSE 566, Spring 2016
*
* Arguments:
*
* q1 real array values at current step
* q2 real array values at next step
* c real true speed of wave
* dx real grid spacing
* dt real time step
* i1,i2 integers indices bounding array data
* nx integer number of grid points
* advection_type
* char if 'L', linear advection;
* otherwise, nonlinear
*/
#include <stdio.h>
#include <stdlib.h>
void diffusion(theta_d2,theta_d1,u3,u1,v3,v1,w3,w1,dx,dy,dz,tstep,dt,i1,i2,j1,j2,k1,k2,nx,ny,nz,BC_WIDTH,K_u,K_v,K_w,K_theta)
int i1,i2,j1,j2,k1,k2,nx,ny,nz,BC_WIDTH;
float theta_d2[][ny][nz],theta_d1[][ny][nz],u3[][ny][nz],w3[][ny][k2+2],v3[][j2+2][nz],u1[][ny][nz],w1[][ny][k2+2],v1[][j2+2][nz],dx,dy,dz,tstep,dt;
float K_u,K_v,K_w,K_theta;
{
int i,j,k;
#pragma omp parallel for shared(u3,u1) private(i,j,k)
for (i=i1+1;i<=i2;i++)
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
u3[i][j][k] = u3[i][j][k] + tstep*K_u*(u1[i+1][j][k]-2*u1[i][j][k]+u1[i-1][j][k])/dx/dx;
u3[i][j][k] = u3[i][j][k] + tstep*K_u*(u1[i][j+1][k]-2*u1[i][j][k]+u1[i][j-1][k])/dy/dy;
u3[i][j][k] = u3[i][j][k] + tstep*K_u*(u1[i][j][k+1]-2*u1[i][j][k]+u1[i][j][k-1])/dz/dz;
}
#pragma omp parallel for shared(v3,v1) private(i,j,k)
for (i=i1;i<=i2;i++)
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
v3[i][j][k] = v3[i][j][k] + tstep*K_v*(v1[i+1][j][k]-2*v1[i][j][k]+v1[i-1][j][k])/dx/dx;
v3[i][j][k] = v3[i][j][k] + tstep*K_v*(v1[i][j+1][k]-2*v1[i][j][k]+v1[i][j-1][k])/dy/dy;
v3[i][j][k] = v3[i][j][k] + tstep*K_v*(v1[i][j][k+1]-2*v1[i][j][k]+v1[i][j][k-1])/dz/dz;
}
#pragma omp parallel for shared(w3,w1) private(i,j,k)
for (i=i1;i<=i2;i++)
for (j=j1;j<=j2;j++)
for (k=k1+1;k<=k2;k++)
{
w3[i][j][k] = w3[i][j][k] + tstep*K_w*(w1[i+1][j][k]-2*w1[i][j][k]+w1[i-1][j][k])/dx/dx;
w3[i][j][k] = w3[i][j][k] + tstep*K_w*(w1[i][j+1][k]-2*w1[i][j][k]+w1[i][j-1][k])/dy/dy;
w3[i][j][k] = w3[i][j][k] + tstep*K_w*(w1[i][j][k+1]-2*w1[i][j][k]+w1[i][j][k-1])/dz/dz;
}
#pragma omp parallel for shared(theta_d2,theta_d1) private(i,j,k)
for (i=i1;i<=i2;i++) /* theta */
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
theta_d2[i][j][k] = theta_d2[i][j][k] + dt*K_theta*(theta_d1[i+1][j][k]-2*theta_d1[i][j][k]+theta_d1[i-1][j][k])/dx/dx;
theta_d2[i][j][k] = theta_d2[i][j][k] + dt*K_theta*(theta_d1[i][j+1][k]-2*theta_d1[i][j][k]+theta_d1[i][j-1][k])/dy/dy;
theta_d2[i][j][k] = theta_d2[i][j][k] + dt*K_theta*(theta_d1[i][j][k+1]-2*theta_d1[i][j][k]+theta_d1[i][j][k-1])/dz/dz;
}
return;
}
|
weno5js_impl_c_.c | #include <stdlib.h>
#include <math.h>
static double eps;
static int size;
static double c00 = 1.0/3.0;
static double c01 = -7.0/6.0;
static double c02 = 11.0/6.0;
static double c10 = -1.0/6.0;
static double c11 = 5.0/6.0;
static double c12 = 1.0/3.0;
static double c20 = 1.0/3.0;
static double c21 = 5.0/6.0;
static double c22 = -1.0/6.0;
static double isc = 13.0 / 12.0;
void weno5js_init(double aeps, int asize)
{
eps = aeps;
size = asize;
}
void weno5js_interpolate(double *a, double *b, double *c,
double *d, double *e, double * restrict out)
{
double q0, q1, q2;
double is0, is1, is2;
double alpha0, alpha1, alpha2;
double sum_alpha;
int i;
// Constant is used for auto-vectorization in GCC
const int ub = size;
#pragma omp simd \
private(q0, q1, q2, is0, is1, is2, alpha0, alpha1, alpha2, sum_alpha)
for (i = 0; i < ub; ++i) {
q0 = c00*a[i] + c01*b[i] + c02*c[i];
q1 = c10*b[i] + c11*c[i] + c12*d[i];
q2 = c20*c[i] + c21*d[i] + c22*e[i];
is0 = isc * pow(a[i] - 2*b[i] + c[i], 2) + 0.25*pow(a[i] - 4*b[i] + 3*c[i], 2);
is1 = isc * pow(b[i] - 2*c[i] + d[i], 2) + 0.25*pow(d[i] - b[i], 2);
is2 = isc * pow(c[i] - 2*d[i] + e[i], 2) + 0.25*pow(3*c[i] - 4*d[i] + e[i], 2);
alpha0 = 1.0 / ((eps + is0)*(eps + is0));
alpha1 = 6.0 / ((eps + is1)*(eps + is1));
alpha2 = 3.0 / ((eps + is2)*(eps + is2));
sum_alpha = alpha0 + alpha1 + alpha2;
out[i] = (alpha0 * q0 + alpha1 * q1 + alpha2 * q2) / sum_alpha;
}
}
|
conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: quanwang@openailab.com
*/
#include "conv_kernel_x86.h"
#include "wino_conv_kernel_x86.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#if __AVX__
#include <immintrin.h>
#endif
#ifndef _MSC_VER
#include <sys/time.h>
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
static int get_private_mem_size(struct tensor* filter)
{
if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32
return filter->elem_num * filter->elem_size * 4;
else
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
static void interleave_uint8(struct tensor* filter, struct conv_priv_info* priv_info)
{
/* dequant uint8 weight to fp32 for simulator */
float* weight_fp32 = (float* )priv_info->interleave_buffer;
uint8_t* weight_uint8 = (uint8_t*)filter->data;
float scale = filter->scale;
int zero_point = filter->zero_point;
for (int i = 0; i < filter->elem_num; i++)
{
weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale;
}
}
void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h,
int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw)
{
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
*(out++) = *in;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
void im2col_uint8(uint8_t* data_img, float* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param)
{
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int inc = param->input_channel / param->group;
int sh = param->stride_h;
int sw = param->stride_w;
int ph = param->pad_h0;
int pw = param->pad_w0;
int dh = param->dilation_h;
int dw = param->dilation_w;
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
float in_fp32 = ((float)in[0] - (float)zero_point) * scale;
out[0] = in_fp32;
out++;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
void im2col_int8(int8_t* data_img, int8_t* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param)
{
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int inc = param->input_channel / param->group;
int sh = param->stride_h;
int sw = param->stride_w;
int ph = param->pad_h0;
int pw = param->pad_w0;
int dh = param->dilation_h;
int dw = param->dilation_w;
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
int8_t * out = data_col + (c * outh + h) * outw;
const int8_t * end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(int8_t));
out += w_low;
while (out < end)
{
in += sw;
out[0] = in[0];
out++;
}
memset(out, 0, (outw - w_high) * sizeof(int8_t));
}
else
{
memset(out, 0, outw * sizeof(int8_t));
}
}
}
}
static void im2col_ir(struct tensor* input, struct tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size);
void* im2col_buf = (void*)priv_info->im2col_buffer;
if (input->data_type == TENGINE_DT_FP32)
{
im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3],
param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w);
}
else if (input->data_type == TENGINE_DT_UINT8)
{
im2col_uint8(input_base, im2col_buf, input, output, param);
}
else if (input->data_type == TENGINE_DT_INT8)
{
im2col_int8(input_base, im2col_buf, input, output, param);
}
else
{
TLOG_ERR("Input data type %d not to be supported.\n", input->data_type);
}
}
void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 3;
int remian_size_start = nn_size << 3;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const float* img = pB + i;
float* tmp = pB_t + (i / 8) * 8 * K;
for (int j = 0; j < K; j++)
{
#if __AVX__
_mm256_storeu_ps(tmp, _mm256_loadu_ps(img));
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
tmp[4] = img[4];
tmp[5] = img[5];
tmp[6] = img[6];
tmp[7] = img[7];
#endif // __SSE__
tmp += 8;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 8 + i % 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 8;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
float* output4 = pC + (i + 4) * N;
float* output5 = pC + (i + 5) * N;
float* output6 = pC + (i + 6) * N;
float* output7 = pC + (i + 7) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
__m256 _sum4 = _mm256_set1_ps(0.0);
__m256 _sum5 = _mm256_set1_ps(0.0);
__m256 _sum6 = _mm256_set1_ps(0.0);
__m256 _sum7 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70
va += 8;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41
_sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51
_sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61
_sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71
va += 8;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42
_sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52
_sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62
_sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72
va += 8;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43
_sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53
_sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63
_sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73
va += 8;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _va4 = _mm256_broadcast_ss(va + 4);
__m256 _va5 = _mm256_broadcast_ss(va + 5);
__m256 _va6 = _mm256_broadcast_ss(va + 6);
__m256 _va7 = _mm256_broadcast_ss(va + 7);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70
va += 8;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
_mm256_storeu_ps(output4, _sum4);
_mm256_storeu_ps(output5, _sum5);
_mm256_storeu_ps(output6, _sum6);
_mm256_storeu_ps(output7, _sum7);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
float sum4[8] = {0};
float sum5[8] = {0};
float sum6[8] = {0};
float sum7[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m256 _sum0_7 = _mm256_set1_ps(0.0);
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _vb1 = _mm256_broadcast_ss(vb + 1);
__m256 _vb2 = _mm256_broadcast_ss(vb + 2);
__m256 _vb3 = _mm256_broadcast_ss(vb + 3);
__m256 _va0 = _mm256_loadu_ps(va);
__m256 _va1 = _mm256_loadu_ps(va + 8);
__m256 _va2 = _mm256_loadu_ps(va + 16);
__m256 _va3 = _mm256_loadu_ps(va + 24);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00
_sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10
_sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20
_sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30
va += 32;
vb += 4;
}
_sum0 = _mm256_add_ps(_sum0, _sum1);
_sum2 = _mm256_add_ps(_sum2, _sum3);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum0);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum2);
for (; k < K; k++)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _va = _mm256_loadu_ps(va);
_sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00
va += 8;
vb += 1;
}
float output_sum0_7[8] = {0.f};
_mm256_storeu_ps(output_sum0_7, _sum0_7);
output0[0] = output_sum0_7[0];
output1[0] = output_sum0_7[1];
output2[0] = output_sum0_7[2];
output3[0] = output_sum0_7[3];
output4[0] = output_sum0_7[4];
output5[0] = output_sum0_7[5];
output6[0] = output_sum0_7[6];
output7[0] = output_sum0_7[7];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
va += 4;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
va += 4;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m128 _sum0_3 = _mm_set1_ps(0.0);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00
_sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10
_sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20
_sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < K; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
float output_sum0_3[4] = {0.f};
_mm_storeu_ps(output_sum0_3, _sum0_3);
output0[0] = output_sum0_3[0];
output1[0] = output_sum0_3[1];
output2[0] = output_sum0_3[2];
output3[0] = output_sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
// output ch0
for (int i = remain_outch_start; i < M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01
_sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02
_sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
va += 1;
vb += 8;
}
_mm256_storeu_ps(output, _sum0);
#else
float sum[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output[n] = sum[n];
}
#endif // __AVX__
output += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
int k = 0;
#if __AVX__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < K; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
#ifdef _WIN32
float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3];
#else
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#endif
#else
float sum0 = 0.f;
#endif // __AVX__
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread)
{
int nn_size = N >> 3;
int remian_size_start = nn_size << 3;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const int8_t* img = pB + i;
int8_t* tmp = pB_t + (i / 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
tmp[4] = img[4];
tmp[5] = img[5];
tmp[6] = img[6];
tmp[7] = img[7];
tmp += 8;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const int8_t* img = pB + i;
int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 8;
int32_t* output0 = pC + ( i )*N;
int32_t* output1 = pC + (i + 1) * N;
int32_t* output2 = pC + (i + 2) * N;
int32_t* output3 = pC + (i + 3) * N;
int32_t* output4 = pC + (i + 4) * N;
int32_t* output5 = pC + (i + 5) * N;
int32_t* output6 = pC + (i + 6) * N;
int32_t* output7 = pC + (i + 7) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
int8_t* va = pA_t + (i / 8) * 8 * K;
int8_t* vb = pB_t + (j / 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
__m256i _sum4 = _mm256_set1_epi32(0);
__m256i _sum5 = _mm256_set1_epi32(0);
__m256i _sum6 = _mm256_set1_epi32(0);
__m256i _sum7 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = k + 4) {
// k0
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
__m256i _vb1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8)));
__m256i _vb2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16)));
__m256i _vb3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7);
va += 8;
// k1
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7);
va += 8;
// k2
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7);
va += 8;
// k3
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3);
_va0 = _mm256_set1_epi32(*(va + 4));
_va1 = _mm256_set1_epi32(*(va + 5));
_va2 = _mm256_set1_epi32(*(va + 6));
_va3 = _mm256_set1_epi32(*(va + 7));
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7);
va += 8;
vb += 32;
}
for (; k < K; k++) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _va4 = _mm256_set1_epi32(*(va + 4));
__m256i _va5 = _mm256_set1_epi32(*(va + 5));
__m256i _va6 = _mm256_set1_epi32(*(va + 6));
__m256i _va7 = _mm256_set1_epi32(*(va + 7));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
_sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4);
_sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5);
_sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6);
_sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7);
va += 8;
vb += 8;
}
_mm256_storeu_si256((__m256i* )output0, _sum0);
_mm256_storeu_si256((__m256i* )output1, _sum1);
_mm256_storeu_si256((__m256i* )output2, _sum2);
_mm256_storeu_si256((__m256i* )output3, _sum3);
_mm256_storeu_si256((__m256i* )output4, _sum4);
_mm256_storeu_si256((__m256i* )output5, _sum5);
_mm256_storeu_si256((__m256i* )output6, _sum6);
_mm256_storeu_si256((__m256i* )output7, _sum7);
#else
int32_t sum0[8] = {0};
int32_t sum1[8] = {0};
int32_t sum2[8] = {0};
int32_t sum3[8] = {0};
int32_t sum4[8] = {0};
int32_t sum5[8] = {0};
int32_t sum6[8] = {0};
int32_t sum7[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j < N; j++)
{
int8_t* va = pA_t + (i / 8) * 8 * K;
int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0_7 = _mm256_set1_epi32(0);
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = k + 4) {
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _vb1 = _mm256_set1_epi32(*(vb + 1));
__m256i _vb2 = _mm256_set1_epi32(*(vb + 2));
__m256i _vb3 = _mm256_set1_epi32(*(vb + 3));
__m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
__m256i _va1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8)));
__m256i _va2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16)));
__m256i _va3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3);
va += 32;
vb += 4;
}
_sum0 = _mm256_add_epi32(_sum0, _sum1);
_sum2 = _mm256_add_epi32(_sum2, _sum3);
_sum0_7 = _mm256_add_epi32(_sum0_7, _sum0);
_sum0_7 = _mm256_add_epi32(_sum0_7, _sum2);
for (; k < K; k++) {
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
_sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7);
va += 8;
vb += 1;
}
int32_t output_sum0_7[8] = {0};
_mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7);
output0[0] = output_sum0_7[0];
output1[0] = output_sum0_7[1];
output2[0] = output_sum0_7[2];
output3[0] = output_sum0_7[3];
output4[0] = output_sum0_7[4];
output5[0] = output_sum0_7[5];
output6[0] = output_sum0_7[6];
output7[0] = output_sum0_7[7];
#else
int32_t sum0 = 0;
int32_t sum1 = 0;
int32_t sum2 = 0;
int32_t sum3 = 0;
int32_t sum4 = 0;
int32_t sum5 = 0;
int32_t sum6 = 0;
int32_t sum7 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
int32_t* output0 = pC + ( i )*N;
int32_t* output1 = pC + (i + 1) * N;
int32_t* output2 = pC + (i + 2) * N;
int32_t* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
int8_t* vb = pB_t + (j / 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = K + 4) {
// k0
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
__m256i _vb1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8)));
__m256i _vb2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16)));
__m256i _vb3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
va += 4;
// k1
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3);
va += 4;
// k2
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3);
va += 4;
// k3
_va0 = _mm256_set1_epi32(*va);
_va1 = _mm256_set1_epi32(*(va + 1));
_va2 = _mm256_set1_epi32(*(va + 2));
_va3 = _mm256_set1_epi32(*(va + 3));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3);
va += 4;
vb += 32;
}
for (; k < K; k++) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3);
va += 4;
vb += 8;
}
_mm256_storeu_si256((__m256i* )output0, _sum0);
_mm256_storeu_si256((__m256i* )output1, _sum1);
_mm256_storeu_si256((__m256i* )output2, _sum2);
_mm256_storeu_si256((__m256i* )output3, _sum3);
#else
int32_t sum0[8] = {0};
int32_t sum1[8] = {0};
int32_t sum2[8] = {0};
int32_t sum3[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j < N; j++)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0_3 = _mm256_set1_epi32(0);
__m256i _sum0 = _mm256_set1_epi32(0);
__m256i _sum1 = _mm256_set1_epi32(0);
__m256i _sum2 = _mm256_set1_epi32(0);
__m256i _sum3 = _mm256_set1_epi32(0);
int k=0;
for (; k + 3 < K; k = k + 4)
{
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _vb1 = _mm256_set1_epi32(*(vb + 1));
__m256i _vb2 = _mm256_set1_epi32(*(vb + 2));
__m256i _vb3 = _mm256_set1_epi32(*(vb + 3));
__m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
__m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4)));
__m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8)));
__m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0);
_sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1);
_sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2);
_sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3);
va+=16;
vb+=4;
}
_sum0 = _mm256_add_epi32(_sum0, _sum1);
_sum2 = _mm256_add_epi32(_sum2, _sum3);
_sum0_3 = _mm256_add_epi32(_sum0_3, _sum0);
_sum0_3 = _mm256_add_epi32(_sum0_3, _sum2);
for (; k < K; k++)
{
__m256i _vb0 = _mm256_set1_epi32(*vb);
__m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va));
_sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3);
va += 4;
vb += 1;
}
//drop last 4 value
int32_t output_sum0_3[4] = {0};
_mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3);
output0[0] = output_sum0_3[0];
output1[0] = output_sum0_3[1];
output2[0] = output_sum0_3[2];
output3[0] = output_sum0_3[3];
#else
int32_t sum0 = 0;
int32_t sum1 = 0;
int32_t sum2 = 0;
int32_t sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
// output ch0
for (int i = remain_outch_start; i < M; i++)
{
int32_t* output = pC + i * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
int8_t* vb = pB_t + (j / 8) * 8 * K;
#if 0 //__AVX__
__m256i _sum0 = _mm256_set1_epi32(0);
int k = 0;
for (; k + 3 < K; k = k + 4) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _va1 = _mm256_set1_epi32(*(va + 1));
__m256i _va2 = _mm256_set1_epi32(*(va + 2));
__m256i _va3 = _mm256_set1_epi32(*(va + 3));
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
__m256i _vb1 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8)));
__m256i _vb2 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16)));
__m256i _vb3 =
_mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24)));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0);
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0);
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0);
va += 4;
vb += 32;
}
for (; k < K; k++) {
__m256i _va0 = _mm256_set1_epi32(*va);
__m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb));
_sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0);
va += 1;
vb += 8;
}
_mm256_storeu_si256((__m256i* )output, _sum0);
#else
int32_t sum[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output[n] = sum[n];
}
#endif
output += 8;
}
for (; j < N; j++)
{
int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K;
int k = 0;
int32_t sum0 = 0.f;
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
static void sgemm_fp32(struct tensor* input, struct tensor* filter, struct tensor* bias,
struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = ( float* )bias->data + outchan_g * group;
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = output_fp32;
sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
static void sgemm_uint8(struct tensor* input, struct tensor* filter, struct tensor* bias,
struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
int* bias_int32 = NULL;
float bias_scale = 0.f;
if (bias)
{
bias_int32 = ( int* )bias->data + outchan_g * group;
bias_scale = input->scale * filter->scale;
}
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = (float*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(float));
sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
/* process bias */
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_sgemm[output_off] += (float )bias_int32[i] * bias_scale;
}
}
}
/* process activation relu */
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
if (output_sgemm[output_off] > 6)
output_sgemm[output_off] = 6;
}
}
}
/* quant from fp32 to uint8 */
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[output_off] = udata;
}
}
sys_free(output_sgemm);
}
static void sgemm_int8(struct tensor* input, struct tensor* filter, struct tensor* bias,
struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4;
int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
int32_t * bias_int32 = NULL;
if (bias)
bias_int32 = ( int* )bias->data + outchan_g * group;
float input_scale = input->scale;
float* kernel_scales = filter->scale_list;
float output_scale = output->scale;
int8_t* filter_sgemm = interleave_int8;
int8_t* input_sgemm_pack4 = im2col_pack4_int8;
int32_t* output_sgemm_int32 = (int32_t*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(int32_t));
float* output_sgemm_fp32 = (float*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(float));
sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread);
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (bias)
output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm_fp32[output_off] < 0)
output_sgemm_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm_fp32[output_off] < 0)
output_sgemm_fp32[output_off] = 0;
if (output_sgemm_fp32[output_off] > 6)
output_sgemm_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
for (int i = 0; i < outchan_g; i++)
{
#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_sgemm_int32);
sys_free(output_sgemm_fp32);
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int input_chan = param->input_channel;
int output_chan = param->output_channel;
int group = param->group;
if (in_h <= 10 && in_w <= 10)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 ||
dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16)
return 0;
return 1;
}
int conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
// simulator uint8 inference with fp32
if (input->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return elem_size * output_xy * kernel_size;
}
int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return (8 * K * (N / 8 + N % 8)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct tensor* filter)
{
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size;
return size;
}
void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = ( float* )priv_info->interleave_buffer;
float* pA_t = ( float* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 3;
int remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
const float* k4 = pA + (p + 4) * K;
const float* k5 = pA + (p + 5) * K;
const float* k6 = pA + (p + 6) * K;
const float* k7 = pA + (p + 7) * K;
float* ktmp = pA_t + (p / 8) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info)
{
int8_t* pA = ( int8_t * )priv_info->interleave_buffer;
int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 3;
int remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const int8_t* k0 = pA + (p + 0) * K;
const int8_t* k1 = pA + (p + 1) * K;
const int8_t* k2 = pA + (p + 2) * K;
const int8_t* k3 = pA + (p + 3) * K;
const int8_t* k4 = pA + (p + 4) * K;
const int8_t* k5 = pA + (p + 5) * K;
const int8_t* k6 = pA + (p + 6) * K;
const int8_t* k7 = pA + (p + 7) * K;
int8_t* ktmp = pA_t + (p / 8) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const int8_t* k0 = pA + (p + 0) * K;
const int8_t* k1 = pA + (p + 1) * K;
const int8_t* k2 = pA + (p + 2) * K;
const int8_t* k3 = pA + (p + 3) * K;
int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < M; p++)
{
const int8_t* k0 = pA + (p + 0) * K;
int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
if (input_tensor->data_type == TENGINE_DT_FP32)
{
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param);
}
}
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_im2col_pack4_mem)
{
int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (input_tensor->data_type == TENGINE_DT_UINT8)
interleave_uint8(filter_tensor, priv_info);
else
interleave(filter_tensor, priv_info);
if (priv_info->external_interleave_pack4_mem)
{
int M = filter_tensor->dims[0];
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer_pack4 = mem;
priv_info->interleave_buffer_pack4_size = mem_size;
if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8)
conv_hcl_interleave_pack4_fp32(M, K, priv_info);
else
conv_hcl_interleave_pack4_int8(M, K, priv_info);
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
}
else
{
priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer;
priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size;
}
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
return wino_conv_hcl_postrun(priv_info);
}
if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem &&
priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL)
{
sys_free(priv_info->im2col_buffer_pack4);
priv_info->im2col_buffer_pack4 = NULL;
}
if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
return 0;
}
int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
if (priv_info->winograd)
{
return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread,
cpu_affinity);
}
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
im2col_ir(input_tensor, output_tensor, priv_info, param, i, j);
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int N = output_tensor->dims[2] * output_tensor->dims[3];
void* im2col_buffer = priv_info->im2col_buffer;
if (priv_info->external_interleave_pack4_mem)
{
if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8)
input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread);
else
input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread);
}
else
{
priv_info->im2col_buffer_pack4 = im2col_buffer;
}
if (type == TENGINE_DT_FP32)
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else if (type == TENGINE_DT_UINT8)
sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else if (type == TENGINE_DT_INT8)
sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else
{
TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type);
return -1;
}
}
}
return 0;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 1;
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
return 0;
}
|
ast-dump-openmp-target-parallel.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp target parallel
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPTargetParallelDirective {{.*}} <line:4:1, col:28>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <line:5:3> openmp_structured_block
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <line:5:3> openmp_structured_block
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
|
GB_binop__rdiv_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp32)
// A*D function (colscale): GB (_AxD__rdiv_fp32)
// D*A function (rowscale): GB (_DxB__rdiv_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp32)
// C=scalar+B GB (_bind1st__rdiv_fp32)
// C=scalar+B' GB (_bind1st_tran__rdiv_fp32)
// C=A+scalar GB (_bind2nd__rdiv_fp32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (bij / aij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y / x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FP32 || GxB_NO_RDIV_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (bij / x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (y / aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij / x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y / aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conjugate_gradient.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include "nb/memory_bot.h"
#include "nb/solver_bot/sparse/sparse.h"
#include "nb/solver_bot/sparse/solvers/conjugate_gradient.h"
#include "../sparse_struct.h"
int nb_sparse_solve_conjugate_gradient
(const nb_sparse_t *const A,
const double *const b,
double *_x, /* Out */
uint32_t max_iter, double tolerance,
uint32_t* niter_performed, /* Out (NULL if not required) */
double* tolerance_reached, /* Out (NULL if not required) */
uint32_t omp_parallel_threads)
/* Return the num of iterations */
{
/* Solve Ax = b with Conjugate Gradient method */
double *g = nb_allocate_zero_mem(A->N * sizeof(double));
double *p = nb_allocate_zero_mem(A->N * sizeof(double));
double *w = nb_allocate_zero_mem(A->N * sizeof(double));
double dot_gg = 0;
#pragma omp parallel for reduction(+:dot_gg) num_threads(omp_parallel_threads) schedule(guided)
for(uint32_t i=0; i< A->N; i++){
double sum = 0;
for(uint32_t j=0; j< A->rows_size[i]; j++)
sum += A->rows_values[i][j] * _x[A->rows_index[i][j]];
g[i] = sum - b[i];
p[i] = -g[i];
dot_gg += g[i]*g[i];
}
uint32_t k = 0;
while(dot_gg > tolerance*tolerance && k < max_iter){
double dot_pw = 0;
dot_gg = 0;
#pragma omp parallel for reduction(+:dot_pw, dot_gg) num_threads(omp_parallel_threads) schedule(guided)
for(uint32_t i = 0; i< A->N; i++){
w[i] = 0;
for(uint32_t j = 0; j< A->rows_size[i]; j++)
w[i] += A->rows_values[i][j] * p[A->rows_index[i][j]];
dot_pw += p[i]*w[i];
dot_gg += g[i]*g[i];
}
double alphak = dot_gg/dot_pw;
double dot_gkgk = 0;
#pragma omp parallel for reduction(+:dot_gkgk) num_threads(omp_parallel_threads) schedule(guided)
for(uint32_t i=0; i< A->N; i++){
_x[i] += alphak*p[i];
g[i] += alphak*w[i];
dot_gkgk += g[i]*g[i];
}
double betak = dot_gkgk/dot_gg;
#pragma omp parallel for num_threads(omp_parallel_threads)
for(uint32_t i=0; i< A->N; i++)
p[i] = -g[i] + betak * p[i];
k++;
}
/* Free memory */
nb_free_mem(g);
nb_free_mem(p);
nb_free_mem(w);
if(niter_performed != NULL) niter_performed[0]= k;
if(tolerance_reached != NULL) *tolerance_reached = sqrt(dot_gg);
if(dot_gg > tolerance*tolerance)
return 1;
return 0;
}
|
GB_msort_3.c | //------------------------------------------------------------------------------
// GB_msort_3: sort a 3-by-n list of integers, using A[0:2][ ] as the key
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// A parallel mergesort of an array of 3-by-n integers. Each key
// consists of three integers.
#include "GB_msort_3.h"
//------------------------------------------------------------------------------
// GB_msort_3_binary_search: binary search for the pivot
//------------------------------------------------------------------------------
// The Pivot value is Y [pivot], and a binary search for the Pivot is made in
// the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on
// input. The return value is pleft, where
//
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
//
// pleft is returned in the range p_start to p_end. If pleft is p_start, then
// the Pivot is smaller than all entries in X [p_start...p_end-1], and the left
// list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is
// larger than all entries in X [p_start...p_end-1], and the right list X
// [pleft...p_end-1] is empty.
static int64_t GB_msort_3_binary_search // return pleft
(
const int64_t *restrict Y_0, // Pivot is Y [pivot]
const int64_t *restrict Y_1,
const int64_t *restrict Y_2,
const int64_t pivot,
const int64_t *restrict X_0, // search in X [p_start..p_end_-1]
const int64_t *restrict X_1,
const int64_t *restrict X_2,
const int64_t p_start,
const int64_t p_end
)
{
//--------------------------------------------------------------------------
// find where the Pivot appears in X
//--------------------------------------------------------------------------
// binary search of X [p_start...p_end-1] for the Pivot
int64_t pleft = p_start ;
int64_t pright = p_end - 1 ;
while (pleft < pright)
{
int64_t pmiddle = (pleft + pright) >> 1 ;
// less = (X [pmiddle] < Pivot)
bool less = GB_lt_3 (X_0, X_1, X_2, pmiddle,
Y_0, Y_1, Y_2, pivot) ;
pleft = less ? (pmiddle+1) : pleft ;
pright = less ? pright : pmiddle ;
}
// binary search is narrowed down to a single item
// or it has found the list is empty:
ASSERT (pleft == pright || pleft == pright + 1) ;
// If found is true then X [pleft == pright] == Pivot. If duplicates
// appear then X [pleft] is any one of the entries equal to the Pivot
// in the list. If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft+1 ... p_end-1] > Pivot holds.
// The value X [pleft] may be either < or > Pivot.
bool found = (pleft == pright) && GB_eq_3 (X_0, X_1, X_2, pleft,
Y_0, Y_1, Y_2, pivot) ;
// Modify pleft and pright:
if (!found && (pleft == pright))
{
if (GB_lt_3 (X_0, X_1, X_2, pleft,
Y_0, Y_1, Y_2, pivot))
{
pleft++ ;
}
else
{
// pright++ ; // (not needed)
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] > Pivot holds,
// and pleft-1 == pright
// If X has no duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
// If X has duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
return (pleft) ;
}
//------------------------------------------------------------------------------
// GB_msort_3_create_merge_tasks
//------------------------------------------------------------------------------
// Recursively constructs ntasks tasks to merge two arrays, Left and Right,
// into Sresult, where Left is L [pL_start...pL_end-1], Right is R
// [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1],
// and where total_work is the total size of Left and Right.
//
// Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and
// R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output
// array S [S_task [tid] ... ]. The task tids created are t0 to
// t0+ntasks-1.
void GB_msort_3_create_merge_tasks
(
// output:
int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed
int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed
int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed
int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed
int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed
// input:
const int t0, // first task tid to create
const int ntasks, // # of tasks to create
const int64_t pS_start, // merge into S [pS_start...]
const int64_t *restrict L_0, // Left = L [pL_start...pL_end-1]
const int64_t *restrict L_1,
const int64_t *restrict L_2,
const int64_t pL_start,
const int64_t pL_end,
const int64_t *restrict R_0, // Right = R [pR_start...pR_end-1]
const int64_t *restrict R_1,
const int64_t *restrict R_2,
const int64_t pR_start,
const int64_t pR_end
)
{
//--------------------------------------------------------------------------
// get problem size
//--------------------------------------------------------------------------
int64_t nleft = pL_end - pL_start ; // size of Left array
int64_t nright = pR_end - pR_start ; // size of Right array
int64_t total_work = nleft + nright ; // total work to do
ASSERT (ntasks >= 1) ;
ASSERT (total_work > 0) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks == 1)
{
//----------------------------------------------------------------------
// a single task will merge all of Left and Right into Sresult
//----------------------------------------------------------------------
L_task [t0] = pL_start ; L_len [t0] = nleft ;
R_task [t0] = pR_start ; R_len [t0] = nright ;
S_task [t0] = pS_start ;
}
else
{
//----------------------------------------------------------------------
// partition the Left and Right arrays for multiple merge tasks
//----------------------------------------------------------------------
int64_t pleft, pright ;
if (nleft >= nright)
{
// split Left in half, and search for its pivot in Right
pleft = (pL_end + pL_start) >> 1 ;
pright = GB_msort_3_binary_search (
L_0, L_1, L_2, pleft,
R_0, R_1, R_2, pR_start, pR_end) ;
}
else
{
// split Right in half, and search for its pivot in Left
pright = (pR_end + pR_start) >> 1 ;
pleft = GB_msort_3_binary_search (
R_0, R_1, R_2, pright,
L_0, L_1, L_2, pL_start, pL_end) ;
}
//----------------------------------------------------------------------
// partition the tasks according to the work of each partition
//----------------------------------------------------------------------
// work0 is the total work in the first partition
int64_t work0 = (pleft - pL_start) + (pright - pR_start) ;
int ntasks0 = (int) round ((double) ntasks *
(((double) work0) / ((double) total_work))) ;
// ensure at least one task is assigned to each partition
ntasks0 = GB_IMAX (ntasks0, 1) ;
ntasks0 = GB_IMIN (ntasks0, ntasks-1) ;
int ntasks1 = ntasks - ntasks0 ;
//----------------------------------------------------------------------
// assign ntasks0 to the first half
//----------------------------------------------------------------------
// ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1]
// into the result S [pS_start...work0-1].
GB_msort_3_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start,
L_0, L_1, L_2, pL_start, pleft,
R_0, R_1, R_2, pR_start, pright) ;
//----------------------------------------------------------------------
// assign ntasks1 to the second half
//----------------------------------------------------------------------
// ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1]
// into the result S [pS_start+work0...pS_start+total_work].
int t1 = t0 + ntasks0 ; // first task id of the second set of tasks
int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S
GB_msort_3_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1,
L_0, L_1, L_2, pleft, pL_end,
R_0, R_1, R_2, pright, pR_end) ;
}
}
//------------------------------------------------------------------------------
// GB_msort_3_merge: merge two sorted lists via a single thread
//------------------------------------------------------------------------------
// merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */
static void GB_msort_3_merge
(
int64_t *restrict S_0, // output of length nleft + nright
int64_t *restrict S_1,
int64_t *restrict S_2,
const int64_t *restrict Left_0, // left input of length nleft
const int64_t *restrict Left_1,
const int64_t *restrict Left_2,
const int64_t nleft,
const int64_t *restrict Right_0, // right input of length nright
const int64_t *restrict Right_1,
const int64_t *restrict Right_2,
const int64_t nright
)
{
int64_t p, pleft, pright ;
// merge the two inputs, Left and Right, while both inputs exist
for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++)
{
if (GB_lt_3 (Left_0, Left_1, Left_2, pleft,
Right_0, Right_1, Right_2, pright))
{
// S [p] = Left [pleft++]
S_0 [p] = Left_0 [pleft] ;
S_1 [p] = Left_1 [pleft] ;
S_2 [p] = Left_2 [pleft] ;
pleft++ ;
}
else
{
// S [p] = Right [pright++]
S_0 [p] = Right_0 [pright] ;
S_1 [p] = Right_1 [pright] ;
S_2 [p] = Right_2 [pright] ;
pright++ ;
}
}
// either input is exhausted; copy the remaining list into S
if (pleft < nleft)
{
int64_t nremaining = (nleft - pleft) ;
memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ;
memcpy (S_2 + p, Left_2 + pleft, nremaining * sizeof (int64_t)) ;
}
else if (pright < nright)
{
int64_t nremaining = (nright - pright) ;
memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ;
memcpy (S_2 + p, Right_2 + pright, nremaining * sizeof (int64_t)) ;
}
}
//------------------------------------------------------------------------------
// GB_msort_3: parallel mergesort
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GB_msort_3 // sort array A of size 3-by-n, using 3 keys (A [0:2][])
(
int64_t *restrict A_0, // size n array
int64_t *restrict A_1, // size n array
int64_t *restrict A_2, // size n array
const int64_t n,
int nthreads // # of threads to use
)
{
//--------------------------------------------------------------------------
// handle small problems with a single thread
//--------------------------------------------------------------------------
if (nthreads <= 1 || n <= GB_BASECASE)
{
// sequential quicksort
GB_qsort_3 (A_0, A_1, A_2, n) ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// determine # of tasks
//--------------------------------------------------------------------------
// determine the number of levels to create, which must always be an
// even number. The # of levels is chosen to ensure that the # of leaves
// of the task tree is between 4*nthreads and 16*nthreads.
// 2 to 4 threads: 4 levels, 16 qsort leaves
// 5 to 16 threads: 6 levels, 64 qsort leaves
// 17 to 64 threads: 8 levels, 256 qsort leaves
// 65 to 256 threads: 10 levels, 1024 qsort leaves
// 256 to 1024 threads: 12 levels, 4096 qsort leaves
// ...
int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ;
int ntasks = 1 << k ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t *restrict W = NULL ; size_t W_size = 0 ;
W = GB_MALLOC_WORK (3*n + 6*ntasks + 1, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
int64_t *T = W ;
int64_t *restrict W_0 = T ; T += n ;
int64_t *restrict W_1 = T ; T += n ;
int64_t *restrict W_2 = T ; T += n ;
int64_t *restrict L_task = T ; T += ntasks ;
int64_t *restrict L_len = T ; T += ntasks ;
int64_t *restrict R_task = T ; T += ntasks ;
int64_t *restrict R_len = T ; T += ntasks ;
int64_t *restrict S_task = T ; T += ntasks ;
int64_t *restrict Slice = T ; T += (ntasks+1) ;
//--------------------------------------------------------------------------
// partition and sort the leaves
//--------------------------------------------------------------------------
GB_eslice (Slice, n, ntasks) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t leaf = Slice [tid] ;
int64_t leafsize = Slice [tid+1] - leaf ;
GB_qsort_3 (A_0 + leaf, A_1 + leaf, A_2 + leaf, leafsize) ;
}
//--------------------------------------------------------------------------
// merge each level
//--------------------------------------------------------------------------
int nt = 1 ;
for ( ; k >= 2 ; k -= 2)
{
//----------------------------------------------------------------------
// merge level k into level k-1, from A into W
//----------------------------------------------------------------------
// TODO: skip k and k-1 for each group of 4 sublists of A if they are
// already sorted with respect to each other.
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two A sublists into one W sublist
GB_msort_3_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
A_0, A_1, A_2, Slice [tid], Slice [tid+nt],
A_0, A_1, A_2, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
GB_msort_3_merge (
W_0 + pS, W_1 + pS, W_2 + pS,
A_0 + pL, A_1 + pL, A_2 + pL, nL,
A_0 + pR, A_1 + pR, A_2 + pR, nR) ;
}
nt = 2*nt ;
//----------------------------------------------------------------------
// merge level k-1 into level k-2, from W into A
//----------------------------------------------------------------------
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two W sublists into one A sublist
GB_msort_3_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
W_0, W_1, W_2, Slice [tid], Slice [tid+nt],
W_0, W_1, W_2, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
GB_msort_3_merge (
A_0 + pS, A_1 + pS, A_2 + pS,
W_0 + pL, W_1 + pL, W_2 + pL, nL,
W_0 + pR, W_1 + pR, W_2 + pR, nR) ;
}
nt = 2*nt ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK (&W, W_size) ;
return (GrB_SUCCESS) ;
}
|
idaFoodWeb_bnd_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* Based on idaFoodWeb_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDA: Food web problem.
*
* This example program (OpenMP version) uses the SUNBAND linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDA using the SUNBAND linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idaFoodWeb_bnd_omp
* To specify the number of threads at the command line, use
* % ./idaFoodWeb_bnd_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ida/ida.h>
#include <sunmatrix/sunmatrix_band.h>
#include <sunlinsol/sunlinsol_band.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_direct.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
N_Vector rates;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNMatrix A;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, retval;
sunindextype mu, ml;
realtype rtol, atol, t0, tout, tret;
int num_threads;
SUNContext ctx;
ida_mem = NULL;
A = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Create the SUNDIALS context object for this simulation */
retval = SUNContext_Create(NULL, &ctx);
if (check_retval(&retval, "SUNContext_Create", 1)) return 1;
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads, ctx);
webdata->acoef = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate(ctx);
if(check_retval((void *) ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
/* Setup band matrix and linear solver, and attach to IDA. */
mu = ml = NSMX;
A = SUNBandMatrix(NEQ, mu, ml, ctx);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
LS = SUNLinSol_Band(cc, A, ctx);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, A);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(mu, ml, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
SUNMatDestroy(A);
N_VDestroy_OpenMP(cc);
N_VDestroy_OpenMP(cp);
N_VDestroy_OpenMP(id);
SUNDlsMat_destroyMat(webdata->acoef);
N_VDestroy_OpenMP(webdata->rates);
free(webdata);
SUNContext_Free(&ctx);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
jx = jy = is = 0;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
sunindextype i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol)
{
printf("\nidaFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDA \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n",
(long int) mu, (long int) ml);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, nreLS, nni, nje, netf, ncfn;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumNonlinSolvIters(ida_mem, &nni);
check_retval(&retval, "IDAGetNumNonlinSolvIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn);
check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1);
retval = IDAGetNumJacEvals(ida_mem, &nje);
check_retval(&retval, "IDAGetNumJacEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nreLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre+nreLS);
printf("Number of Jacobian evaluations = %ld\n", nje);
printf("Number of nonlinear iterations = %ld\n", nni);
printf("Number of error test failures = %ld\n", netf);
printf("Number of nonlinear conv. failures = %ld\n", ncfn);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
jx = jy = is = 0;
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
point_outlier.h | /****************************************************************************
* VCGLib o o *
* Visual and Computer Graphics Library o o *
* _ O _ *
* Copyright(C) 2004-2015 \/)\/ *
* Visual Computing Lab /\/| *
* ISTI - Italian National Research Council | *
* \ *
* All rights reserved. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License (http://www.gnu.org/licenses/gpl.txt) *
* for more details. *
* *
****************************************************************************/
#ifndef VCG_TRI_OUTLIERS__H
#define VCG_TRI_OUTLIERS__H
#include <vcg/space/index/kdtree/kdtree.h>
namespace vcg
{
namespace tri
{
template <class MeshType>
class OutlierRemoval
{
public:
typedef typename MeshType::ScalarType ScalarType;
typedef typename vcg::KdTree<ScalarType> KdTreeType;
typedef typename vcg::KdTree<ScalarType>::PriorityQueue PriorityQueue;
/**
Compute an outlier probability value for each vertex of the mesh using the approch
in the paper "LoOP: Local Outlier Probabilities". The outlier probability is stored in the
vertex attribute "outlierScore". It use the input kdtree to find the kNearest of each vertex.
"LoOP: local outlier probabilities" by Hans-Peter Kriegel et al.
Proceedings of the 18th ACM conference on Information and knowledge management
*/
static void ComputeLoOPScore(MeshType& mesh, KdTreeType& kdTree, int kNearest)
{
vcg::tri::RequireCompactness(mesh);
typename MeshType::template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore"));
typename MeshType::template PerVertexAttributeHandle<ScalarType> sigma = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("sigma"));
typename MeshType::template PerVertexAttributeHandle<ScalarType> plof = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("plof"));
#pragma omp parallel for schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
PriorityQueue queue;
kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue);
ScalarType sum = 0;
for (int j = 0; j < queue.getNofElements(); j++)
sum += queue.getWeight(j);
sum /= (queue.getNofElements());
sigma[i] = sqrt(sum);
}
float mean = 0;
#pragma omp parallel for reduction(+: mean) schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
PriorityQueue queue;
kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue);
ScalarType sum = 0;
for (int j = 0; j < queue.getNofElements(); j++)
sum += sigma[queue.getIndex(j)];
sum /= (queue.getNofElements());
plof[i] = sigma[i] / sum - 1.0f;
mean += plof[i] * plof[i];
}
mean /= mesh.vert.size();
mean = sqrt(mean);
#pragma omp parallel for schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
ScalarType value = plof[i] / (mean * sqrt(2.0f));
double dem = 1.0 + 0.278393 * value;
dem += 0.230389 * value * value;
dem += 0.000972 * value * value * value;
dem += 0.078108 * value * value * value * value;
ScalarType op = max(0.0, 1.0 - 1.0 / dem);
outlierScore[i] = op;
}
tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("sigma"));
tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("plof"));
};
/**
Select all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0].
*/
static int SelectLoOPOutliers(MeshType& mesh, KdTreeType& kdTree, int kNearest, float threshold)
{
ComputeLoOPScore(mesh, kdTree, kNearest);
int count = 0;
typename MeshType:: template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>::template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore"));
for (int i = 0; i < mesh.vert.size(); i++)
{
if (outlierScore[i] > threshold)
{
mesh.vert[i].SetS();
count++;
}
}
return count;
}
/**
Delete all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0].
*/
static int DeleteLoOPOutliers(MeshType& m, KdTreeType& kdTree, int kNearest, float threshold)
{
SelectLoOPOutliers(m,kdTree,kNearest,threshold);
int ovn = m.vn;
for(typename MeshType::VertexIterator vi=m.vert.begin();vi!=m.vert.end();++vi)
if((*vi).IsS() ) tri::Allocator<MeshType>::DeleteVertex(m,*vi);
tri::Allocator<MeshType>::CompactVertexVector(m);
tri::Allocator<MeshType>::DeletePerVertexAttribute(m, std::string("outlierScore"));
return m.vn - ovn;
}
};
} // end namespace tri
} // end namespace vcg
#endif // VCG_TRI_OUTLIERS_H
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MaxTextExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
CacheView
*view;
size_t
number_threads;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->number_threads=wand_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) wand_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width,wand_view->number_threads);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~WandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict duplex_indexes,
*magick_restrict indexes;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelBlack(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MaxTextExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != WandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (destination->extent.height-destination->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,destination->description,
progress,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewThreads() sets the number of threads in a thread team.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewThreads(WandView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetWandViewThreads(WandView *image_view,
const size_t number_threads)
{
assert(image_view != (WandView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->number_threads=number_threads;
if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource))
image_view->number_threads=GetOpenMPMaximumThreads();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
timing.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
double start_time;
double end_time;
float *dirty;
float ressss;
int flushsz=100000000;
int num_of_core=8;
void get_time(int flag){
float tttmp[num_of_core];
if (flag == 1){
dirty = (float *)malloc(flushsz * sizeof(float));
#pragma omp parallel for
for (int dirt = 0; dirt < flushsz; dirt++){
dirty[dirt] += dirt%100;
tttmp[dirt%num_of_core] += dirty[dirt];
}
for(int ii =0; ii<num_of_core;ii++){ressss+= tttmp[ii];}
//printf("flush\n");
start_time = omp_get_wtime();
}
else{
end_time = omp_get_wtime() - start_time;
printf("time is : %lf\n", end_time);
free(dirty);
}
}
|
GB_binop__iseq_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int64)
// A*D function (colscale): GB (_AxD__iseq_int64)
// D*A function (rowscale): GB (_DxB__iseq_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int64)
// C=scalar+B GB (_bind1st__iseq_int64)
// C=scalar+B' GB (_bind1st_tran__iseq_int64)
// C=A+scalar GB (_bind2nd__iseq_int64)
// C=A'+scalar GB (_bind2nd_tran__iseq_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT64 || GxB_NO_ISEQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rawMD4_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* Copyright (c) 2011, 2012 by magnum
*
* Use of Bartavelle's mmx/sse2/intrinsics and reduced binary size by
* magnum in 2011-2012.
*
* OMP added May 2013, JimF
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawMD4;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawMD4);
#else
#include <string.h>
#include "arch.h"
#include "md4.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
//#undef SIMD_COEF_32
//#undef SIMD_PARA_MD4
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-MD4"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "MD4 " MD4_ALGORITHM_NAME
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4)
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifndef MD4_BUF_SIZ
#define MD4_BUF_SIZ 16
#endif
#define CIPHERTEXT_LENGTH 32
#define DIGEST_SIZE 16
#define BINARY_SIZE DIGEST_SIZE
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define FORMAT_TAG "$MD4$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests tests[] = {
{"8a9d093f14f8701df17732b2bb182c74", "password"},
{FORMAT_TAG "6d78785c44ea8dfa178748b245d8c3ae", "magnum" },
{"6d78785c44ea8dfa178748b245d8c3ae", "magnum" },
{FORMAT_TAG "31d6cfe0d16ae931b73c59d7e0c089c0", "" },
{FORMAT_TAG "934eb897904769085af8101ad9dabca2", "John the ripper" },
{FORMAT_TAG "cafbb81fb64d9dd286bc851c4c6e0d21", "lolcode" },
{FORMAT_TAG "585028aa0f794af812ee3be8804eb14a", "123456" },
{FORMAT_TAG "23580e2a459f7ea40f9efa148b63cafb", "12345" },
{FORMAT_TAG "2ae523785d0caf4d2fb557c12016185c", "123456789" },
{FORMAT_TAG "f3e80e83b29b778bc092bf8a7c6907fe", "iloveyou" },
{FORMAT_TAG "4d10a268a303379f224d8852f2d13f11", "princess" },
{FORMAT_TAG "bf75555ca19051f694224f2f5e0b219d", "1234567" },
{FORMAT_TAG "41f92cf74e3d2c3ba79183629a929915", "rockyou" },
{FORMAT_TAG "012d73e0fab8d26e0f4d65e36077511e", "12345678" },
{FORMAT_TAG "0ceb1fd260c35bd50005341532748de6", "abc123" },
{NULL}
};
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 55
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD4_BUF_SIZ*4*SIMD_COEF_32 )
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
static ARCH_WORD_32 (*saved_key)[MD4_BUF_SIZ*NBKEYS];
static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
#else
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifndef SIMD_COEF_32
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16l[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned long dummy;
unsigned int i[DIGEST_SIZE/sizeof(unsigned int)];
} _out;
unsigned int *out = _out.i;
unsigned int i;
unsigned int temp;
ciphertext += TAG_LENGTH;
for (i=0; i<4; i++)
{
temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])]));
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24;
#if ARCH_LITTLE_ENDIAN
out[i]=temp;
#else
out[i]=JOHNSWAP(temp);
#endif
}
#if SIMD_COEF_32 && defined(REVERSE_STEPS)
md4_reverse(out);
#endif
return out;
}
static char *source(char *source, void *binary)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG;
ARCH_WORD_32 b[4];
char *p;
int i, j;
memcpy(b, binary, sizeof(b));
#if SIMD_COEF_32 && defined(REVERSE_STEPS)
md4_unreverse(b);
#endif
#if ARCH_LITTLE_ENDIAN==0
alter_endianity(b, 16);
#endif
p = &out[TAG_LENGTH];
for (i = 0; i < 4; i++)
for (j = 0; j < 8; j++)
*p++ = itoa16[(b[i] >> ((j ^ 1) * 4)) & 0xf];
return out;
}
#ifdef SIMD_COEF_32
static void set_key(char *_key, int index)
{
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *key = (ARCH_WORD_32*)_key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *key = (uint32_t*)(is_aligned(_key, sizeof(uint32_t)) ?
_key : strcpy(buf_aligned, _key));
#endif
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD4_BUF_SIZ*SIMD_COEF_32];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((temp = *key++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xff) | (0x80 << 8);
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xffff) | (0x80 << 16);
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = temp | (0x80U << 24);
len+=3;
goto key_cleaning;
}
*keybuf_word = temp;
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[14*SIMD_COEF_32] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef SIMD_COEF_32
static char *get_key(int index)
{
static char out[PLAINTEXT_LENGTH + 1];
unsigned int i;
ARCH_WORD_32 len = ((ARCH_WORD_32*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*MD4_BUF_SIZ*SIMD_COEF_32] >> 3;
for(i=0;i<len;i++)
out[i] = ((char*)saved_key)[GETPOS(i, index)];
out[i] = 0;
return (char*)out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
#endif
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
for (index = 0; index < loops; index++)
#endif
{
#if SIMD_COEF_32
SIMDmd4body(saved_key[index], crypt_key[index], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);
#else
MD4_CTX ctx;
MD4_Init(&ctx);
MD4_Update(&ctx, saved_key[index], saved_len[index]);
MD4_Final((unsigned char *)crypt_key[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x, y;
#ifdef _OPENMP
const unsigned int c = (count + SIMD_COEF_32 - 1) / SIMD_COEF_32;
#else
const unsigned int c = SIMD_PARA_MD4;
#endif
for(y = 0; y < c; y++)
for(x = 0; x < SIMD_COEF_32; x++)
{
if( ((ARCH_WORD_32*)binary)[1] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x+SIMD_COEF_32] )
return 1;
}
return 0;
#else
unsigned int index = 0;
#ifdef _OPENMP
for (index = 0; index < count; index++)
#endif
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x = index&(SIMD_COEF_32-1);
unsigned int y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)binary)[1] == ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4+SIMD_COEF_32];
#else
return !memcmp(binary, crypt_key, BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
#ifdef SIMD_COEF_32
ARCH_WORD_32 crypt_key[DIGEST_SIZE / 4];
MD4_CTX ctx;
char *key = get_key(index);
MD4_Init(&ctx);
MD4_Update(&ctx, key, strlen(key));
MD4_Final((void*)crypt_key, &ctx);
#ifdef REVERSE_STEPS
md4_reverse(crypt_key);
#endif
return !memcmp(get_binary(source), crypt_key, DIGEST_SIZE);
#else
return 1;
#endif
}
#ifdef SIMD_COEF_32
#define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4+SIMD_COEF_32
static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[SIMD_INDEX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[1] & PH_MASK_6; }
#endif
static int binary_hash_0(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_0; }
static int binary_hash_1(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_1; }
static int binary_hash_2(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_2; }
static int binary_hash_3(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_3; }
static int binary_hash_4(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_4; }
static int binary_hash_5(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_5; }
static int binary_hash_6(void * binary) { return ((ARCH_WORD_32*)binary)[1] & PH_MASK_6; }
struct fmt_main fmt_rawMD4 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
// NOTE: Kernel launch/map was extremely costly.
// Hence, we use separate CUDA kernels for these operators.
template<typename DType, typename T1, typename T2>
void ToTensorImplCUDA(mshadow::Stream<gpu> *s,
const T1 input,
const T2 output,
const int req,
const float normalize_factor);
template<typename DType>
void NormalizeImplCUDA(mshadow::Stream<gpu> *s,
const DType *input,
DType *output,
const int req,
const int N,
const int C,
const int H,
const int W,
const float mean_d0,
const float mean_d1,
const float mean_d2,
const float std_d0,
const float std_d1,
const float std_d2);
template<typename DType>
void NormalizeBackwardImplCUDA(mshadow::Stream<gpu> *s,
const DType *out_grad,
DType *in_grad,
const int req,
const int N,
const int C,
const int H,
const int W,
const float std_d0,
const float std_d1,
const float std_d2);
#endif // MXNET_USE_CUDA
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &shp = (*in_attrs)[0];
if (!shape_is_known(shp)) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<typename DType, int req>
inline void ToTensor(float* out_data, const DType* in_data,
const int length,
const int channels,
const float normalize_factor,
const int step) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channels + c]) / normalize_factor);
}
}
}
inline void ToTensorImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channel,
const float normalize_factor,
const int step) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
ToTensor<DType, req_type>(output, input, length, channel,
normalize_factor, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
// We do not use temp buffer when performance the operation.
// Hence, this check is necessary.
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
const float normalize_factor = 255.0f;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>>
(s, input, output, req_type, normalize_factor);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>>
(s, input, output, req_type, normalize_factor);
}
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D Input - (h, w, c)
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const int channel = static_cast<int>(inputs[0].shape_[2]);
const int step = 0;
ToTensorImpl(inputs, outputs, req, length,
channel, normalize_factor, step);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[3]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl(inputs, outputs, req, length, channel,
normalize_factor, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
mxnet::Tuple<float> mean;
mxnet::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(mxnet::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(mxnet::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
int nchannels = 0;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<typename DType, int req>
inline void Normalize(DType* out_data,
const DType* in_data,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + c*length + i] - mean[c]) / std[c]);
}
}
}
inline void NormalizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
Normalize<DType, req_type>(output, input, length, channels, step,
mean, std);
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Mean and Std can be 1 or 3D only.
std::vector<float> mean(3);
std::vector<float> std(3);
if (param.mean.ndim() == 1) {
mean[0] = mean[1] = mean[2] = param.mean[0];
} else {
mean[0] = param.mean[0];
mean[1] = param.mean[1];
mean[2] = param.mean[2];
}
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *input = nullptr;
DType *output = nullptr;
if (inputs[0].ndim() == 3) {
N = 1;
C = static_cast<int>(inputs[0].shape_[0]);
H = static_cast<int>(inputs[0].shape_[1]);
W = static_cast<int>(inputs[0].shape_[2]);
input = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(inputs[0].shape_[0]);
C = static_cast<int>(inputs[0].shape_[1]);
H = static_cast<int>(inputs[0].shape_[2]);
W = static_cast<int>(inputs[0].shape_[3]);
input = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeImplCUDA<DType>(s, input, output, req_type,
N, C, H, W,
mean[0], mean[1], mean[2],
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D input (c, h, w)
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[0]);
const int step = 0;
NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const int channel = static_cast<int>(inputs[0].shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl(inputs, outputs, req, length, channel, n*step, mean, std);
}
}
}
// Backward function
template<typename DType, int req>
inline void NormalizeBackward(const DType* out_grad,
DType* in_grad,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(in_grad[step + c*length + i], req,
out_grad[step + c*length + i] * (1.0 / std[c]));
}
}
}
inline void NormalizeBackwardImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> std
) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* out_grad = inputs[0].dptr<DType>();
DType* in_grad = outputs[0].dptr<DType>();
NormalizeBackward<DType, req_type>(out_grad, in_grad, length,
channels, step, std);
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Std can be 1 or 3D only.
std::vector<float> std(3);
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *in_grad = nullptr;
DType *out_grad = nullptr;
if (in_data.ndim() == 3) {
N = 1;
C = static_cast<int>(in_data.shape_[0]);
H = static_cast<int>(in_data.shape_[1]);
W = static_cast<int>(in_data.shape_[2]);
out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(in_data.shape_[0]);
C = static_cast<int>(in_data.shape_[1]);
H = static_cast<int>(in_data.shape_[2]);
W = static_cast<int>(in_data.shape_[3]);
out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeBackwardImplCUDA<DType>(s, out_grad, in_grad, req_type,
N, C, H, W,
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (in_data.ndim() == 3) {
// 3D input (c, h, w)
const int length = in_data.shape_[1] * in_data.shape_[2];
const int channel = static_cast<int>(in_data.shape_[0]);
const int step = 0;
NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const int channel = static_cast<int>(in_data.shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl(inputs, outputs, req, length, channel, n*step, std);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
mxnet::TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const mxnet::TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (int i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
for (int j = 0; j < (mid >> 1); ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
} else if (h >= 6) {
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
mxnet::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const mxnet::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "covariance.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*covariance.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int m, int n, double *float_n, double data[1400][1200]) {
int i, j;
*float_n = (double) n;
for(i = 0; i < 1400; i++)
for(j = 0; j < 1200; j++)
data[i][j] = ((double) i * j) / 1200;
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int m, double cov[1200][1200]) {
int i, j;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "cov");
for(i = 0; i < m; i++)
for(j = 0; j < m; j++) {
if((i * m + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", cov[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "cov");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_covariance(int m, int n, double float_n, double data[1400][1200], double cov[1200][1200], double mean[1200]) {
int i, j, k;
#pragma omp parallel for default(shared) private(j, i) firstprivate(m, n, float_n, data)
for(j = 0; j < m; j++) {
mean[j] = 0.0;
// #pragma omp parallel for default(shared) private(i) firstprivate(n, j, data) reduction(+ : mean[j])
for(i = 0; i < n; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
#pragma omp parallel for default(shared) private(i, j) firstprivate(n, m, mean)
for(i = 0; i < n; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(m, i, mean)
for(j = 0; j < m; j++)
data[i][j] -= mean[j];
}
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(m, n, float_n, data)
for(i = 0; i < m; i++) {
// #pragma omp parallel for default(shared) private(j, k) firstprivate(i, m, n, float_n, data)
for(j = i; j < m; j++) {
cov[i][j] = 0.0;
// #pragma omp parallel for default(shared) private(k) firstprivate(n, i, j, data) reduction(+ : cov[i][j])
for(k = 0; k < n; k++)
cov[i][j] += data[k][i] * data[k][j];
cov[i][j] /= (float_n - 1.0);
cov[j][i] = cov[i][j];
}
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int n = 1400;
int m = 1200;
/*Variable declaration/allocation.*/
double float_n;
double (*data)[1400][1200];
data = (double (*)[1400][1200]) polybench_alloc_data((1400 + 0) * (1200 + 0), sizeof(double));
;
double (*cov)[1200][1200];
cov = (double (*)[1200][1200]) polybench_alloc_data((1200 + 0) * (1200 + 0), sizeof(double));
;
double (*mean)[1200];
mean = (double (*)[1200]) polybench_alloc_data(1200 + 0, sizeof(double));
;
/*Initialize array(s).*/
init_array(m, n, &float_n, *data);
/*Start timer.*/
;
/*Run kernel.*/
kernel_covariance(m, n, float_n, *data, *cov, *mean);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(m, *cov);
/*Be clean.*/
free((void *) data);
;
free((void *) cov);
;
free((void *) mean);
;
return 0;
}
|
clipperz_srp_fmt_plug.c | /* This software was repurposed by Dhiru Kholia (dhiru at openwall.com)
* in 2012.
*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2012. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2012 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
* Format was busted, just like wow-srp. It ONLY was handling binary residue
* if the residue was exactly 64 hex bytes long. Well for exponentation, it
* does not have to be 64 bytes. It can be shorter. We also handle case where
* a shorter result number is 0 Lpadded to an even 64 bytes. split() should
* be added to canonize these hashes, since they are same hash with
* multiple representations.
*
* This implements the SRP protocol, with Clipperz documented
* implementation specifics.
*
* s = random salt value.
*
* v is the 'verifier' value (256 bit value).
*
* Clipperz's offline database has following relevant fields,
*
* <script>_clipperz_dump_data_ = { ...
*
* '2f2134e38b23534adfcd43c2f7223caf3a53a8db7ce800f1e918e8e0d06b8b7a': {
* s: 'e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32',
* v: 'e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f
* version: '0.2',
* ...
* }
* P algorithm:
* h1 = hashlib.sha256(password + username).digest()
* P = h2 = hashlib.sha256(h1).hexdigest()
*
* x algorithm:
* x1 = hashlib.sha256(s + P).digest()
* x = hashlib.sha256(x1).hexdigest()
*
* v algorithm:
* v = Clipperz.Crypto.SRP.g().powerModule(new Clipperz.Crypto.BigInt(x,16),Clipperz.Crypto.SRP.n());
* n = 125617018995153554710546479714086468244499594888726646874671447258204721048803
* g = 2 */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_clipperz;
#elif FMT_REGISTERS_H
john_register_one(&fmt_clipperz);
#else
#if AC_BUILT
/* need to know if HAVE_LIBGMP is set, for autoconfig build */
#include "autoconfig.h"
#endif
#include <string.h>
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#ifdef HAVE_LIBGMP
#if HAVE_GMP_GMP_H
#include <gmp/gmp.h>
#else
#include <gmp.h>
#endif
#define EXP_STR " GMP-exp"
#else
#include <openssl/bn.h>
#define EXP_STR " oSSL-exp"
#endif
#include "johnswap.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Clipperz"
#define FORMAT_NAME "SRP"
#define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR EXP_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CLIPPERZSIG "$clipperz$"
#define CLIPPERZSIGLEN 10
#define PLAINTEXT_LENGTH 16
#define CIPHERTEXT_LENGTH 65
#define BINARY_SIZE 33
#define BINARY_ALIGN 4
#define FULL_BINARY_SIZE 33
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 1
#define USERNAMELEN 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 4
#define SZ 128
// salt is in hex (salt and salt2)
static struct fmt_tests tests[] = {
{CLIPPERZSIG"e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f$e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32*hackme@mailinator.com", "openwall"},
{"$clipperz$05b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"},
{"$clipperz$5b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"},
{NULL}
};
#ifdef HAVE_LIBGMP
typedef struct t_SRP_CTX {
mpz_t z_mod, z_base, z_exp, z_rop;
} SRP_CTX;
#else
typedef struct t_SRP_CTX {
BIGNUM *z_mod, *z_base, *z_exp, *z_rop;
BN_CTX *BN_ctx;
}SRP_CTX;
#endif
static SRP_CTX *pSRP_CTX;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
// BN_bn2bin sometimes tries to write 33 bytes, hence allow some padding!
// that is because these are mod 0x115B8B692E0E045692CF280B436735C77A5A9E8A9E7ED56C965F87DB5B2A2ECE3
// which is a 65 hex digit number (33 bytes long).
static ARCH_WORD_32 (*crypt_out)[(FULL_BINARY_SIZE/4) + 1];
static struct custom_salt {
unsigned char saved_salt[SZ];
unsigned char user_id[SZ];
} *cur_salt;
#ifdef HAVE_LIBGMP
static int max_keys_per_crypt;
#endif
static void init(struct fmt_main *self)
{
int i;
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
pSRP_CTX = mem_calloc_align(sizeof(*pSRP_CTX), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
#ifdef HAVE_LIBGMP
max_keys_per_crypt = self->params.max_keys_per_crypt;
#endif
for (i = 0; i < self->params.max_keys_per_crypt; ++i) {
#ifdef HAVE_LIBGMP
mpz_init_set_str(pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803", 10);
mpz_init_set_str(pSRP_CTX[i].z_base, "2", 10);
mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10);
mpz_init(pSRP_CTX[i].z_rop);
// Now, properly initialized mpz_exp, so it is 'large enough' to hold any SHA256 value
// we need to put into it. Then we simply need to copy in the data, and possibly set
// the limb count size.
mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159);
#else
pSRP_CTX[i].z_mod=BN_new();
BN_dec2bn(&pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803");
pSRP_CTX[i].z_base=BN_new();
BN_set_word(pSRP_CTX[i].z_base, 2);
pSRP_CTX[i].z_exp=BN_new();
pSRP_CTX[i].z_rop=BN_new();
pSRP_CTX[i].BN_ctx = BN_CTX_new();
#endif
}
}
void done(void)
{
#ifdef HAVE_LIBGMP
int i;
for (i = 0; i < max_keys_per_crypt; ++i) {
mpz_clear(pSRP_CTX[i].z_mod);
mpz_clear(pSRP_CTX[i].z_base);
mpz_clear(pSRP_CTX[i].z_exp);
mpz_clear(pSRP_CTX[i].z_rop);
}
#endif
MEM_FREE(pSRP_CTX);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p = NULL;
if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += CLIPPERZSIGLEN;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (strlen(p) > CIPHERTEXT_LENGTH)
goto err;
if (!ishex_oddOK(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL)
goto err;
if (strlen(p) > SZ-1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL)
goto err;
if (strlen(p) > SZ-1)
goto err;
if ((p = strtokm(NULL, "*")))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt) {
static char ct[128+2*SZ+1];
char *cp;
if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN))
return ciphertext;
strnzcpy(ct, ciphertext, sizeof(ct));
cp = strchr(&ct[CLIPPERZSIGLEN], '$');
if (!cp)
return ciphertext;
*cp = 0;
strlwr(&ct[CLIPPERZSIGLEN]);
*cp = '$';
if (ct[CLIPPERZSIGLEN] == '0') {
char *cpi = &ct[CLIPPERZSIGLEN];
char *cpo = cpi;
while (*cpi == '0')
++cpi;
do {
*cpo++ = *cpi;
} while (*cpi++);
}
return ct;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[FULL_BINARY_SIZE];
ARCH_WORD_32 dummy[1];
} buf;
unsigned char *out = buf.c;
char *p, *q;
int i;
p = &ciphertext[CLIPPERZSIGLEN];
q = strchr(p, '$');
memset(buf.c, 0, sizeof(buf));
while (*p == '0')
++p;
if ((q-p)&1) {
out[0] = atoi16[ARCH_INDEX(*p)];
++p;
} else {
out[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
for (i = 1; i < FULL_BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
if (p >= q)
break;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p;
char *q;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
p = ciphertext;
p = strchr(&ciphertext[CLIPPERZSIGLEN], '$') + 1;
q = strrchr(ciphertext, '*');
strncpy((char*)cs.saved_salt, p, q - p);
p = strrchr(ciphertext, '*') + 1;
strcpy((char*)cs.user_id, p);
return (void *)&cs;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int salt_hash(void *salt)
{
unsigned int hash = 0;
char *p = (char *)salt;
while (*p) {
hash <<= 1;
hash += (unsigned char)*p++;
if (hash >> SALT_HASH_LOG) {
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
}
}
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
return hash;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static inline void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < count; ++j) {
SHA256_CTX ctx;
unsigned char Tmp[32];
unsigned char TmpHex[64];
memset(crypt_out[j], 0, sizeof(crypt_out[j]));
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[j], strlen(saved_key[j]));
SHA256_Update(&ctx, cur_salt->user_id, strlen((char*)cur_salt->user_id));
SHA256_Final(Tmp, &ctx);
SHA256_Init(&ctx);
SHA256_Update(&ctx, Tmp, 32);
SHA256_Final(Tmp, &ctx);
SHA256_Init(&ctx);
SHA256_Update(&ctx, cur_salt->saved_salt, strlen((char*)cur_salt->saved_salt));
hex_encode(Tmp, 32, TmpHex);
SHA256_Update(&ctx, TmpHex, 64);
SHA256_Final(Tmp, &ctx);
SHA256_Init(&ctx);
SHA256_Update(&ctx, Tmp, 32);
SHA256_Final(Tmp, &ctx);
#ifdef HAVE_LIBGMP
{
unsigned char HashStr[80], *p;
int i, todo;
p = HashStr;
for (i = 0; i < 32; ++i) {
*p++ = itoa16[Tmp[i]>>4];
*p++ = itoa16[Tmp[i]&0xF];
}
*p = 0;
mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16);
mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod );
mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop);
p = HashStr;
todo = strlen((char*)p);
if (todo&1) {
((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)];
++p;
--todo;
} else {
((unsigned char*)(crypt_out[j]))[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
todo -= 2;
}
todo >>= 1;
for (i = 1; i <= todo; i++) {
((unsigned char*)(crypt_out[j]))[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
}
#else
// using oSSL's BN to do expmod.
pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,32,pSRP_CTX[j].z_exp);
BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx);
BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j]));
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (*((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[i])))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE);
}
struct fmt_main fmt_clipperz = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
Mapping.h | //===--------- Mapping.h - OpenMP device runtime mapping helpers -- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_MAPPING_H
#define OMPTARGET_MAPPING_H
#include "Types.h"
namespace _OMP {
namespace mapping {
#pragma omp declare target
inline constexpr uint32_t MaxThreadsPerTeam = 1024;
#pragma omp end declare target
/// Initialize the mapping machinery.
void init(bool IsSPMD);
/// Return true if the kernel is executed in SPMD mode.
bool isSPMDMode();
/// Return true if the kernel is executed in generic mode.
bool isGenericMode();
/// Return true if the executing thread is the main thread in generic mode.
/// These functions will lookup state and it is required that that is OK for the
/// thread and location. See also `isInitialThreadInLevel0` for a stateless
/// alternative for certain situations, e.g. during initialization.
bool isMainThreadInGenericMode();
bool isMainThreadInGenericMode(bool IsSPMD);
/// Return true if this thread is the initial thread in parallel level 0.
///
/// The thread for which this returns true should be used for single threaded
/// initialization tasks. We pick a special thread to ensure there are no
/// races between the initialization and the first read of initialized state.
bool isInitialThreadInLevel0(bool IsSPMD);
/// Return true if the executing thread has the lowest Id of the active threads
/// in the warp.
bool isLeaderInWarp();
/// Return a mask describing all active threads in the warp.
LaneMaskTy activemask();
/// Return a mask describing all threads with a smaller Id in the warp.
LaneMaskTy lanemaskLT();
/// Return a mask describing all threads with a larget Id in the warp.
LaneMaskTy lanemaskGT();
/// Return the thread Id in the warp, in [0, getWarpSize()).
uint32_t getThreadIdInWarp();
/// Return the thread Id in the block, in [0, getBlockSize()).
uint32_t getThreadIdInBlock();
/// Return the warp id in the block.
uint32_t getWarpId();
/// Return the warp size, thus number of threads in the warp.
uint32_t getWarpSize();
/// Return the number of warps in the block.
uint32_t getNumberOfWarpsInBlock();
/// Return the block Id in the kernel, in [0, getKernelSize()).
uint32_t getBlockId();
/// Return the block size, thus number of threads in the block.
///
/// Note: The version taking \p IsSPMD mode explicitly can be used during the
/// initialization of the target region, that is before `mapping::isSPMDMode()`
/// can be called by any thread other than the main one.
uint32_t getBlockSize();
uint32_t getBlockSize(bool IsSPMD);
/// Return the number of blocks in the kernel.
uint32_t getNumberOfBlocks();
/// Return the kernel size, thus number of threads in the kernel.
uint32_t getKernelSize();
/// Return the number of processing elements on the device.
uint32_t getNumberOfProcessorElements();
} // namespace mapping
} // namespace _OMP
#endif
|
activation.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_ACTIVATION_H_
#define MACE_KERNELS_ACTIVATION_H_
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/core/types.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
enum ActivationType {
NOOP = 0,
RELU = 1,
RELUX = 2,
PRELU = 3,
TANH = 4,
SIGMOID = 5
};
inline ActivationType StringToActivationType(const std::string type) {
if (type == "RELU") {
return ActivationType::RELU;
} else if (type == "RELUX") {
return ActivationType::RELUX;
} else if (type == "PRELU") {
return ActivationType::PRELU;
} else if (type == "TANH") {
return ActivationType::TANH;
} else if (type == "SIGMOID") {
return ActivationType::SIGMOID;
} else if (type == "NOOP") {
return ActivationType::NOOP;
} else {
LOG(FATAL) << "Unknown activation type: " << type;
}
return ActivationType::NOOP;
}
template <typename T>
void DoActivation(const T *input_ptr,
T *output_ptr,
const index_t size,
const ActivationType type,
const float relux_max_limit) {
MACE_CHECK(DataTypeToEnum<T>::value != DataType::DT_HALF);
switch (type) {
case NOOP:
break;
case RELU:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::max(input_ptr[i], static_cast<T>(0));
}
break;
case RELUX:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::min(std::max(input_ptr[i], static_cast<T>(0)),
static_cast<T>(relux_max_limit));
}
break;
case TANH:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = std::tanh(input_ptr[i]);
}
break;
case SIGMOID:
#pragma omp parallel for
for (index_t i = 0; i < size; ++i) {
output_ptr[i] = 1 / (1 + std::exp(-input_ptr[i]));
}
break;
default:
LOG(FATAL) << "Unknown activation type: " << type;
}
}
template <typename T>
void PReLUActivation(const T *input_ptr,
const index_t outer_size,
const index_t input_chan,
const index_t inner_size,
const T *alpha_ptr,
T *output_ptr) {
#pragma omp parallel for collapse(3)
for (index_t i = 0; i < outer_size; ++i) {
for (index_t chan_idx = 0; chan_idx < input_chan; ++chan_idx) {
for (index_t j = 0; j < inner_size; ++j) {
index_t idx = i * input_chan * inner_size + chan_idx * inner_size + j;
if (input_ptr[idx] < 0) {
output_ptr[idx] = input_ptr[idx] * alpha_ptr[chan_idx];
} else {
output_ptr[idx] = input_ptr[idx];
}
}
}
}
}
template <DeviceType D, typename T>
class ActivationFunctor;
template <>
class ActivationFunctor<DeviceType::CPU, float> {
public:
ActivationFunctor(ActivationType type, float relux_max_limit)
: activation_(type), relux_max_limit_(relux_max_limit) {}
MaceStatus operator()(const Tensor *input,
const Tensor *alpha,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const float *input_ptr = input->data<float>();
float *output_ptr = output->mutable_data<float>();
if (activation_ == PRELU) {
MACE_CHECK_NOTNULL(alpha);
const float *alpha_ptr = alpha->data<float>();
const index_t outer_size = output->dim(0);
const index_t inner_size = output->dim(2) * output->dim(3);
PReLUActivation(input_ptr, outer_size, input->dim(1), inner_size,
alpha_ptr, output_ptr);
} else {
DoActivation(input_ptr, output_ptr, output->size(), activation_,
relux_max_limit_);
}
return MACE_SUCCESS;
}
private:
ActivationType activation_;
float relux_max_limit_;
};
#ifdef MACE_ENABLE_OPENCL
template <typename T>
class ActivationFunctor<DeviceType::GPU, T> {
public:
ActivationFunctor(ActivationType type, T relux_max_limit)
: activation_(type), relux_max_limit_(static_cast<T>(relux_max_limit)) {}
MaceStatus operator()(const Tensor *input,
const Tensor *alpha,
Tensor *output,
StatsFuture *future);
private:
ActivationType activation_;
T relux_max_limit_;
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::string tuning_key_prefix_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_ACTIVATION_H_
|
parallel_sort.c | /*
parallel_sort.c: A parallel version of sort, not stable
*/
#include<stdlib.h>
#include<string.h>
#include<stdbool.h>
#include <parallel_sort.h>
void serial_merge(void *src1, void *src2,
void *dest,
size_t nmemb1, size_t nmemb2, size_t size,
int (*compar ) (const void *, const void * )){
register void* left_ptr= src1;
register void* left_ptr_max= src1+size*nmemb1;
register void* right_ptr= src2;
register void* right_ptr_max= src2+size*nmemb2;
// Special case where the arrays don't overlap
if (compar(left_ptr_max-size,right_ptr) < 0) {
memcpy(dest,left_ptr,left_ptr_max-left_ptr);
dest+= left_ptr_max-left_ptr;
memcpy(dest,right_ptr,right_ptr_max-right_ptr);
return;
} else if (compar(right_ptr_max-size,left_ptr) < 0) {
memcpy(dest,right_ptr,right_ptr_max-right_ptr);
dest+= right_ptr_max-right_ptr;
memcpy(dest,left_ptr,left_ptr_max-left_ptr);
return;
}
// General case
while (left_ptr < left_ptr_max && right_ptr < right_ptr_max) {
if (compar(right_ptr,left_ptr) < 0) {
memcpy(dest,right_ptr,size);
right_ptr+= size;
}
else {
memcpy(dest,left_ptr,size);
left_ptr+= size;
}
dest+= size;
}
if ( left_ptr < left_ptr_max )
memcpy(dest,left_ptr,left_ptr_max-left_ptr);
else if ( right_ptr < right_ptr_max )
memcpy(dest,right_ptr,right_ptr_max-right_ptr);
return;
}
size_t binary_search(void *base,void *val,size_t nmemb,size_t size,
int (*compar ) (const void *, const void * )){
register size_t low_indx;
register size_t high_indx;
register size_t mid_indx;
// special cases: val < array and val > array
if ( compar(val,base) < 0 )
return 0;
else if ( compar(val,base+size*(nmemb-1) ) > 0 )
return nmemb;
low_indx= 0;
high_indx= nmemb-1;
while ( compar(base+low_indx*size,base+high_indx*size) < 0 ) {
mid_indx= low_indx + (high_indx - low_indx) / 2;
if ( compar(val,base+mid_indx*size) <= 0 )
high_indx= mid_indx;
else
low_indx= mid_indx + 1;
}
return low_indx;
}
void parallel_merge(void *src1, void *src2,
void *dest,
size_t nmemb1, size_t nmemb2, size_t size,
int (*compar ) (const void *, const void * )){
register size_t val_indx;
if ( nmemb1+nmemb2 < PARALLEL_SERIAL_MERGE_SWITCH ) {
serial_merge(src1,src2,dest,nmemb1,nmemb2,size,compar);
return;
}
if ( nmemb1 < nmemb2 ) {
parallel_merge(src2,src1,dest,nmemb2,nmemb1,size,compar);
return;
}
val_indx= binary_search(src2,src1+size*(nmemb1/2),nmemb2,size,compar);
memcpy(dest+size*((nmemb1/2)+val_indx),src1+size*(nmemb1/2),size);
#pragma omp task
{
parallel_merge(src1,src2,dest,nmemb1/2,val_indx,size,compar);
}
#pragma omp task
{
parallel_merge(src1+size*(nmemb1/2+1),src2+size*val_indx,
dest+size*((nmemb1/2+1)+val_indx),
nmemb1-nmemb1/2-1,nmemb2-val_indx,size,compar);
}
#pragma omp taskwait
}
void parallel_mergesort(void *src, void *dest,
size_t nmemb, size_t size,
int (*compar ) (const void *, const void * ),
bool src2dest){
if ( nmemb < PARALLEL_SERIAL_SORT_SWITCH ) {
if ( src2dest ) {
qsort(src,nmemb,size,compar);
memcpy(dest,src,nmemb*size);
}
else {
qsort(dest,nmemb,size,compar);
memcpy(src,dest,nmemb*size);
}
return;
}
#pragma omp task
{
parallel_mergesort(src,dest,nmemb/2,size,compar,!src2dest);
}
#pragma omp task
{
parallel_mergesort(src+size*(nmemb/2),dest+size*(nmemb/2),
nmemb-nmemb/2,size,compar,!src2dest);
}
#pragma omp taskwait
if ( src2dest )
parallel_merge(src,src+size*(nmemb/2),dest,nmemb/2,nmemb-nmemb/2,
size,compar);
else
parallel_merge(dest,dest+size*(nmemb/2),src,nmemb/2,nmemb-nmemb/2,
size,compar);
return;
}
void parallel_sort(void *base, size_t nmemb, size_t size,
int (*compar ) (const void *, const void * )){
void* aux= malloc( nmemb * size );
memcpy(aux,base,nmemb*size);
#pragma omp parallel num_threads(PARALLEL_SORT_NUM_THREADS)
#pragma omp single
parallel_mergesort(aux,base,nmemb,size,compar,true);
free(aux);
return;
}
|
mxv_omp_mpi.c | /**
* Program to multiply a matrix times a vector using both
* MPI to distribute the computation among nodes and OMP
* to distribute the computation among threads.
*/
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "mat.h"
#define min(x, y) ((x)<(y)?(x):(y))
int main(int argc, char* argv[])
{
int nrows, ncols;
double *aa, *b, *c;
double *buffer, ans;
double *times;
double total_times;
int run_index;
int nruns;
int myid, master, numprocs;
double starttime, endtime;
MPI_Status status;
int i, j, numsent, sender;
int anstype, row;
srand(time(0));
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (argc > 1) {
nrows = atoi(argv[1]);
ncols = nrows;
// aa = (double*)malloc(sizeof(double) * nrows * ncols);
b = (double*)malloc(sizeof(double) * ncols);
c = (double*)malloc(sizeof(double) * nrows);
buffer = (double*)malloc(sizeof(double) * ncols);
master = 0;
if (myid == master) {
// Master Code goes here
aa = gen_matrix(nrows, ncols);
starttime = MPI_Wtime();
numsent = 0;
MPI_Bcast(b, ncols, MPI_DOUBLE, master, MPI_COMM_WORLD);
for (i = 0; i < min(numprocs-1, nrows); i++) {
for (j = 0; j < ncols; j++) {
buffer[j] = aa[i * ncols + j];
}
MPI_Send(buffer, ncols, MPI_DOUBLE, i+1, i+1, MPI_COMM_WORLD);
numsent++;
}
for (i = 0; i < nrows; i++) {
MPI_Recv(&ans, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
sender = status.MPI_SOURCE;
anstype = status.MPI_TAG;
c[anstype-1] = ans;
if (numsent < nrows) {
for (j = 0; j < ncols; j++) {
buffer[j] = aa[numsent*ncols + j];
}
MPI_Send(buffer, ncols, MPI_DOUBLE, sender, numsent+1,
MPI_COMM_WORLD);
numsent++;
} else {
MPI_Send(MPI_BOTTOM, 0, MPI_DOUBLE, sender, 0, MPI_COMM_WORLD);
}
}
endtime = MPI_Wtime();
printf("%f\n",(endtime - starttime));
} else {
// Slave Code goes here
MPI_Bcast(b, ncols, MPI_DOUBLE, master, MPI_COMM_WORLD);
if (myid <= nrows) {
while(1) {
MPI_Recv(buffer, ncols, MPI_DOUBLE, master, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
if (status.MPI_TAG == 0){
break;
}
row = status.MPI_TAG;
ans = 0.0;
#pragma omp parallel
#pragma omp shared(ans) for reduction(+:ans)
for (j = 0; j < ncols; j++) {
ans += buffer[j] * b[j];
}
MPI_Send(&ans, 1, MPI_DOUBLE, master, row, MPI_COMM_WORLD);
}
}
}
} else {
fprintf(stderr, "Usage matrix_times_vector <size>\n");
}
MPI_Finalize();
return 0;
}
|
parallel.c | //Potential Solution
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include "utils.h"
double work_it_par(long* old, long* new, long* super, long* simple, long* fibonacci) {
int i, j, k;
int u;
int ton = 0;
long compute_it, moving_average;
double pi, pi2, x, y, sum, step = 0.0;
long dot_product = 0;
long nCirc = 0;
long aggregate = 1.0;
double r = 1.0;
int was_smart = 16;
step = 1.0 / NUM_STEPS;
int u0, u1, u2, u3, u4, u5, u6, u7, u8, u9;
u0 = 0;
u1 = 0;
u2 = 0;
u3 = 0;
u4 = 0;
u5 = 0;
u6 = 0;
u7 = 0;
u8 = 0;
u9 = 0;
moving_average = 0;
for (i = 0; i < DIM - 1; i++)
{
super[i] += simple[i];
}
#pragma omp parallel for private(i) reduction(+:dot_product)
for (i = 0; i < DIM - 1; i++)
{
dot_product += super[i] * simple[i];
}
printf("\n A secret is: %d", obfuscate_obfuscate_obfuscate(5));
fibonacci[0] = 1;
fibonacci[1] = 1;
for (i = 2; i < DIM - 1; i++)
{
fibonacci[i] = fibonacci[i - 1] + fibonacci[i - 2];
}
#pragma omp parallel private(i,j, k, u,ton, x, y, compute_it) reduction(+:dot_product,sum,aggregate,nCirc,u0,u1,u2,u3,u4,u5,u6,u7,u8,u9,)
{
#pragma omp for
for (i = 0; i < NUM_STEPS; i++)
{
x = (i + 0.5) * step;
sum = sum + 4.0 / (1.0 + x * x);
}
#pragma omp for
for (i = 0; i < NUM_TRIALS; i++)
{
x = (random() % 10000000) / 10000000.0;
y = (random() % 10000000) / 10000000.0;
if ((x * x + y * y) <= r * r) {
nCirc++;
}
}
const int DSQ = DIM * DIM;
#pragma omp for
for (i = 1; i < DIM - 1; i++) {
for (j = 1; j < DIM - 1; j++) {
for (k = 1; k < DIM - 1; k++) {
compute_it = old[i * DSQ + j * DIM + k] * we_need_the_func();
aggregate += compute_it / gimmie_the_func();
}
}
}
#pragma omp for
for (i = 1; i < DIM - 1; i++)
{
int iDSQ = i * DSQ;
int iDM = (i - 1) * DSQ;
int iDA = (i + 1) * DSQ;
for (j = 1; j < DIM - 1; j++) {
for (k = 1; k < DIM - 1; k++) {
int temp_unroll = 0;
temp_unroll += old[iDM + (j - 1) * DIM + (k - 1)];
temp_unroll += old[iDM + (j - 1) * DIM + k];
temp_unroll += old[iDM + (j - 1) * DIM + (k + 1)];
temp_unroll += old[iDM + j * DIM + (k - 1)];
temp_unroll += old[iDM + j * DIM + k];
temp_unroll += old[iDM + j * DIM + (k + 1)];
temp_unroll += old[iDM + (j + 1) * DIM + (k - 1)];
temp_unroll += old[iDM + (j + 1) * DIM + k];
temp_unroll += old[iDM + (j + 1) * DIM + (k + 1)];
temp_unroll += old[iDSQ + (j - 1) * DIM + (k - 1)];
temp_unroll += old[iDSQ + (j - 1) * DIM + k];
temp_unroll += old[iDSQ + (j - 1) * DIM + (k + 1)];
temp_unroll += old[iDSQ + j * DIM + (k - 1)];
temp_unroll += old[iDSQ + j * DIM + k];
temp_unroll += old[iDSQ + j * DIM + (k + 1)];
temp_unroll += old[iDSQ + (j + 1) * DIM + (k - 1)];
temp_unroll += old[iDSQ + (j + 1) * DIM + k];
temp_unroll += old[iDSQ + (j + 1) * DIM + (k + 1)];
temp_unroll += old[iDA + (j - 1) * DIM + (k - 1)];
temp_unroll += old[iDA + (j - 1) * DIM + k];
temp_unroll += old[iDA + (j - 1) * DIM + (k + 1)];
temp_unroll += old[iDA + j * DIM + (k - 1)];
temp_unroll += old[iDA + j * DIM + k];
temp_unroll += old[iDA + j * DIM + (k + 1)];
temp_unroll += old[iDA + (j + 1) * DIM + (k - 1)];
temp_unroll += old[iDA + (j + 1) * DIM + k];
temp_unroll += old[iDA + (j + 1) * DIM + (k + 1)];
temp_unroll /= 27;
new[iDSQ + j * DIM + k] = temp_unroll;
u = (temp_unroll / 100);
if (u <= 0) u0++;
if (u >= 9) u9++;
if (u == 1) u1++;
if (u == 2) u2++;
if (u == 3) u3++;
if (u == 4) u4++;
if (u == 5) u5++;
if (u == 6) u6++;
if (u == 7) u7++;
if (u == 8) u8++;
}
}
}
}
histogrammy[0] = u0;
histogrammy[9] = u9;
histogrammy[1] = u1;
histogrammy[2] = u2;
histogrammy[3] = u3;
histogrammy[4] = u4;
histogrammy[5] = u5;
histogrammy[6] = u6;
histogrammy[7] = u7;
histogrammy[8] = u8;
pi = step * sum;
printf("\n %d trials, Riemann flavored pi is %f \n", NUM_STEPS, pi);
pi2 = 4.0 * ((double)nCirc / (double)NUM_TRIALS);
printf("\n %d trials, Monte-Carlo flavored pi is %f \n", NUM_TRIALS, pi2);
printf("AGGR:%ld\n", aggregate);
return (double)(dot_product + moving_average + pi + pi2);
}
|
DRB070-simd1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
One dimension array computation with a vetorization directive
*/
int a[100], b[100], c[100];
int main()
{
int i;
#pragma omp parallel for private(i )
for (i=0;i<100;i++) {
a[i]= i * 40;
b[i] = i - 1;
c[i] = i;
}
#pragma omp parallel for private(i )
for (i=0;i<100;i++)
a[i]=b[i]*c[i];
for (i=0;i<100;i++) {
printf("%d %d %d\n", a[i], b[i], c[i]);
}
return 0;
}
|
q1.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <omp.h>
//static long int num_steps = 10000000;
double f(double x) {
double ans = 4.0/(1+x*x);
return(ans);
}
int main() {
int j;
for(j=3;j<=8;j++){
long int num_steps = pow(10,j);
double dx = 1.0/(double)num_steps;
double sum = 0;
double net_start = omp_get_wtime();
#pragma omp serial
{
int id = omp_get_thread_num();
double thread_start=omp_get_wtime();
int i;
double partial = 0;
for(i=0;i<num_steps;i++) {
double x = (i+0.5)*dx;
partial+=f(x);
}
partial*=dx;
double thread_elapsed = -1*(thread_start-omp_get_wtime());
#pragma omp critical
sum+=partial;
}
double net_elapsed = omp_get_wtime() - net_start;
double pi = sum;
printf("pi %0.9lf net_elapsed %lf\n",pi,net_elapsed);
}
}
|
GB_split_sparse_template.c | //------------------------------------------------------------------------------
// GB_split_sparse_template: split a single tile from a sparse matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get A and C, and the slicing of C
//--------------------------------------------------------------------------
#ifndef GB_ISO_SPLIT
const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
#endif
//--------------------------------------------------------------------------
// copy the tile from A to C
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < C_ntasks ; tid++)
{
int64_t kfirst = kfirst_Cslice [tid] ;
int64_t klast = klast_Cslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// int64_t jA = GBH (Ah, k+akstart) ; not needed
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, tid, k,
kfirst, klast, pstart_Cslice, Cp, cvlen) ;
int64_t p0 = Cp [k] ;
int64_t pA_offset = Wp [k + akstart] ;
// copy the vector from A to C
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// get the index of A(iA,jA)
int64_t pA = pA_offset + pC - p0 ;
int64_t iA = Ai [pA] ;
// shift the index and copy into C(i,j)
Ci [pC] = iA - aistart ;
GB_COPY (pC, pA) ;
}
}
}
done = true ;
}
#undef GB_CTYPE
#undef GB_ISO_SPLIT
|
core_dtsmqr.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztsmqr.c, normal z -> d, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
/***************************************************************************//**
*
* @ingroup core_tsmqr
*
* Overwrites the general m1-by-n1 tile A1 and
* m2-by-n2 tile A2 with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q
* | A2 |
*
* trans = PlasmaTrans Q^T * | A1 | | A1 A2 | * Q^T
* | A2 |
*
* where Q is a complex orthogonal matrix defined as the product of k
* elementary reflectors
*
* Q = H(1) H(2) . . . H(k)
*
* as returned by plasma_core_dtsqrt.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft : apply Q or Q^T from the Left;
* - PlasmaRight : apply Q or Q^T from the Right.
*
* @param[in] trans
* - PlasmaNoTrans : Apply Q;
* - PlasmaTrans : Apply Q^T.
*
* @param[in] m1
* The number of rows of the tile A1. m1 >= 0.
*
* @param[in] n1
* The number of columns of the tile A1. n1 >= 0.
*
* @param[in] m2
* The number of rows of the tile A2. m2 >= 0.
* m2 = m1 if side == PlasmaRight.
*
* @param[in] n2
* The number of columns of the tile A2. n2 >= 0.
* n2 = n1 if side == PlasmaLeft.
*
* @param[in] k
* The number of elementary reflectors whose product defines
* the matrix Q.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m1-by-n1 tile A1.
* On exit, A1 is overwritten by the application of Q.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m1).
*
* @param[in,out] A2
* On entry, the m2-by-n2 tile A2.
* On exit, A2 is overwritten by the application of Q.
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m2).
*
* @param[in] V
* The i-th row must contain the vector which defines the
* elementary reflector H(i), for i = 1,2,...,k, as returned by
* plasma_core_DTSQRT in the first k columns of its array argument V.
*
* @param[in] ldv
* The leading dimension of the array V. ldv >= max(1,k).
*
* @param[in] T
* The ib-by-k triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param work
* Auxiliary workspace array of length
* ldwork-by-n1 if side == PlasmaLeft
* ldwork-by-ib if side == PlasmaRight
*
* @param[in] ldwork
* The leading dimension of the array work.
* ldwork >= max(1,ib) if side == PlasmaLeft
* ldwork >= max(1,m1) if side == PlasmaRight
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dtsmqr(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
double *A1, int lda1,
double *A2, int lda2,
const double *V, int ldv,
const double *T, int ldt,
double *work, int ldwork)
{
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_coreblas_error("illegal value of side");
return -1;
}
if (trans != PlasmaNoTrans && trans != PlasmaTrans) {
plasma_coreblas_error("illegal value of trans");
return -2;
}
if (m1 < 0) {
plasma_coreblas_error("illegal value of m1");
return -3;
}
if (n1 < 0) {
plasma_coreblas_error("illegal value of n1");
return -4;
}
if (m2 < 0 || (m2 != m1 && side == PlasmaRight)) {
plasma_coreblas_error("illegal value of m2");
return -5;
}
if (n2 < 0 || (n2 != n1 && side == PlasmaLeft)) {
plasma_coreblas_error("illegal value of n2");
return -6;
}
if (k < 0 ||
(side == PlasmaLeft && k > m1) ||
(side == PlasmaRight && k > n1)) {
plasma_coreblas_error("illegal value of k");
return -7;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -8;
}
if (A1 == NULL) {
plasma_coreblas_error("NULL A1");
return -9;
}
if (lda1 < imax(1, m1)) {
plasma_coreblas_error("illegal value of lda1");
return -10;
}
if (A2 == NULL) {
plasma_coreblas_error("NULL A2");
return -11;
}
if (lda2 < imax(1, m2)) {
plasma_coreblas_error("illegal value of lda2");
return -12;
}
if (V == NULL) {
plasma_coreblas_error("NULL V");
return -13;
}
if (ldv < imax(1, side == PlasmaLeft ? m2 : n2)) {
plasma_coreblas_error("illegal value of ldv");
return -14;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -15;
}
if (ldt < imax(1, ib)) {
plasma_coreblas_error("illegal value of ldt");
return -16;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -17;
}
if (ldwork < imax(1, side == PlasmaLeft ? ib : m1)) {
plasma_coreblas_error("illegal value of ldwork");
return -18;
}
// quick return
if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0)
return PlasmaSuccess;
int i1, i3;
if ((side == PlasmaLeft && trans != PlasmaNoTrans) ||
(side == PlasmaRight && trans == PlasmaNoTrans)) {
i1 = 0;
i3 = ib;
}
else {
i1 = ((k-1)/ib)*ib;
i3 = -ib;
}
for (int i = i1; i > -1 && i < k; i += i3) {
int kb = imin(ib, k-i);
int ic = 0;
int jc = 0;
int mi = m1;
int ni = n1;
if (side == PlasmaLeft) {
// H or H^T is applied to C(i:m,1:n).
mi = m1 - i;
ic = i;
}
else {
// H or H^T is applied to C(1:m,i:n).
ni = n1 - i;
jc = i;
}
// Apply H or H^T (NOTE: plasma_core_dparfb used to be core_ztsrfb).
plasma_core_dparfb(side, trans, PlasmaForward, PlasmaColumnwise,
mi, ni, m2, n2, kb, 0,
&A1[lda1*jc+ic], lda1,
A2, lda2,
&V[ldv*i], ldv,
&T[ldt*i], ldt,
work, ldwork);
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_dtsmqr(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
double *A1, int lda1,
double *A2, int lda2,
const double *V, int ldv,
const double *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*n1]) \
depend(inout:A2[0:lda2*n2]) \
depend(in:V[0:ldv*k]) \
depend(in:T[0:ib*k])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
double *W = (double*)work.spaces[tid];
int ldwork = side == PlasmaLeft ? ib : m1; // TODO: double check
// Call the kernel.
int info = plasma_core_dtsmqr(side, trans,
m1, n1, m2, n2, k, ib,
A1, lda1,
A2, lda2,
V, ldv,
T, ldt,
W, ldwork);
if (info != PlasmaSuccess) {
plasma_error("core_dtsmqr() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
16_primes-par2.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
int main(int argc, char **argv) {
// quantos numeros primos entre 1 e N ?
unsigned long n = 99999;
unsigned long aux = 2;
unsigned long primes = 0;
//Escalonamento dinamico: cada thread executa uma iteração (fcfs)
#pragma omp parallel for firstprivate(aux) reduction(+:primes) schedule(dynamic)
for (unsigned long i = 2; i < n; i++) {
while (aux < i) {
if (i % aux == 0) break;
aux++;
}
if (aux == i) primes++;
aux = 2;
}
printf("%lu primos entre 1 e %lu\n",primes,n);
return 0;
}
|
core_ctsmlq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztsmlq.c, normal z -> c, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
/***************************************************************************//**
*
* @ingroup core_tsmlq
*
* Overwrites the general complex m1-by-n1 tile A1 and
* m2-by-n2 tile A2 with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q
* | A2 |
*
* trans = Plasma_ConjTrans Q^H * | A1 | | A1 A2 | * Q^H
* | A2 |
*
* where Q is a complex unitary matrix defined as the product of k
* elementary reflectors
*
* Q = H(k)^H . . . H(2)^H H(1)^H
*
* as returned by plasma_core_ctslqt.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft : apply Q or Q^H from the Left;
* - PlasmaRight : apply Q or Q^H from the Right.
*
* @param[in] trans
* - PlasmaNoTrans : Apply Q;
* - Plasma_ConjTrans : Apply Q^H.
*
* @param[in] m1
* The number of rows of the tile A1. m1 >= 0.
*
* @param[in] n1
* The number of columns of the tile A1. n1 >= 0.
*
* @param[in] m2
* The number of rows of the tile A2. m2 >= 0.
* m2 = m1 if side == PlasmaRight.
*
* @param[in] n2
* The number of columns of the tile A2. n2 >= 0.
* n2 = n1 if side == PlasmaLeft.
*
* @param[in] k
* The number of elementary reflectors whose product defines
* the matrix Q.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m1-by-n1 tile A1.
* On exit, A1 is overwritten by the application of Q.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m1).
*
* @param[in,out] A2
* On entry, the m2-by-n2 tile A2.
* On exit, A2 is overwritten by the application of Q.
*
* @param[in] lda2
* The leading dimension of the tile A2. lda2 >= max(1,m2).
*
* @param[in] V
* The i-th row must contain the vector which defines the
* elementary reflector H(i), for i = 1,2,...,k, as returned by
* plasma_core_ctslqt in the first k rows of its array argument V.
*
* @param[in] ldv
* The leading dimension of the array V. ldv >= max(1,k).
*
* @param[in] T
* The ib-by-k triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param work
* Auxiliary workspace array of length
* ldwork-by-m1 if side == PlasmaLeft
* ldwork-by-ib if side == PlasmaRight
*
* @param[in] ldwork
* The leading dimension of the array work.
* ldwork >= max(1,ib) if side == PlasmaLeft
* ldwork >= max(1,n1) if side == PlasmaRight
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_ctsmlq(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
plasma_complex32_t *A1, int lda1,
plasma_complex32_t *A2, int lda2,
const plasma_complex32_t *V, int ldv,
const plasma_complex32_t *T, int ldt,
plasma_complex32_t *work, int ldwork)
{
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_coreblas_error("illegal value of side");
return -1;
}
if (trans != PlasmaNoTrans && trans != Plasma_ConjTrans) {
plasma_coreblas_error("illegal value of trans");
return -2;
}
if (m1 < 0) {
plasma_coreblas_error("illegal value of m1");
return -3;
}
if (n1 < 0) {
plasma_coreblas_error("illegal value of n1");
return -4;
}
if (m2 < 0 || (m2 != m1 && side == PlasmaRight)) {
plasma_coreblas_error("illegal value of m2");
return -5;
}
if (n2 < 0 || (n2 != n1 && side == PlasmaLeft)) {
plasma_coreblas_error("illegal value of n2");
return -6;
}
if (k < 0 ||
(side == PlasmaLeft && k > m1 ) ||
(side == PlasmaRight && k > n1)) {
plasma_coreblas_error("illegal value of k");
return -7;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -8;
}
if (A1 == NULL) {
plasma_coreblas_error("NULL A1");
return -9;
}
if (lda1 < imax(1, m1)) {
plasma_coreblas_error("illegal value of lda1");
return -10;
}
if (A2 == NULL) {
plasma_coreblas_error("NULL A2");
return -11;
}
if (lda2 < imax(1, m2)) {
plasma_coreblas_error("illegal value of lda2");
return -12;
}
if (V == NULL) {
plasma_coreblas_error("NULL V");
return -13;
}
if (ldv < imax(1, k)) {
plasma_coreblas_error("illegal value of ldv");
return -14;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -15;
}
if (ldt < imax(1, ib)) {
plasma_coreblas_error("illegal value of ldt");
return -16;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -17;
}
if (ldwork < imax(1, side == PlasmaLeft ? ib : n1)) {
plasma_coreblas_error("illegal value of ldwork");
return -18;
}
// quick return
if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0)
return PlasmaSuccess;
int i1, i3;
if ((side == PlasmaLeft && trans == PlasmaNoTrans) ||
(side == PlasmaRight && trans != PlasmaNoTrans)) {
i1 = 0;
i3 = ib;
}
else {
i1 = ((k-1)/ib)*ib;
i3 = -ib;
}
if (trans == PlasmaNoTrans)
trans = Plasma_ConjTrans;
else
trans = PlasmaNoTrans;
for (int i = i1; i > -1 && i < k; i += i3) {
int kb = imin(ib, k-i);
int ic = 0;
int jc = 0;
int mi = m1;
int ni = n1;
if (side == PlasmaLeft) {
// H or H^H is applied to C(i:m,1:n).
mi = m1 - i;
ic = i;
}
else {
// H or H^H is applied to C(1:m,i:n).
ni = n1 - i;
jc = i;
}
// Apply H or H^H.
plasma_core_cparfb(side, trans, PlasmaForward, PlasmaRowwise,
mi, ni, m2, n2, kb, 0,
&A1[lda1*jc+ic], lda1,
A2, lda2,
&V[i], ldv,
&T[ldt*i], ldt,
work, ldwork);
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_ctsmlq(plasma_enum_t side, plasma_enum_t trans,
int m1, int n1, int m2, int n2, int k, int ib,
plasma_complex32_t *A1, int lda1,
plasma_complex32_t *A2, int lda2,
const plasma_complex32_t *V, int ldv,
const plasma_complex32_t *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*n1]) \
depend(inout:A2[0:lda2*n2]) \
depend(in:V[0:ldv*n2]) \
depend(in:T[0:ib*k])
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
plasma_complex32_t *W = (plasma_complex32_t*)work.spaces[tid];
int ldwork = side == PlasmaLeft ? ib : n1; // TODO: float check
// Call the kernel.
int info = plasma_core_ctsmlq(side, trans,
m1, n1, m2, n2, k, ib,
A1, lda1,
A2, lda2,
V, ldv,
T, ldt,
W, ldwork);
if (info != PlasmaSuccess) {
plasma_error("core_ctsmlq() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
sorgqr.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zungqr.c, normal z -> s, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_ungqr
*
* Generates an m-by-n matrix Q with orthonormal columns, which
* is defined as the first n columns of a product of the elementary reflectors
* returned by plasma_sgeqrf.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix Q. m >= 0.
*
* @param[in] n
* The number of columns of the matrix Q. m >= n >= 0.
*
* @param[in] k
* The number of columns of elementary tile reflectors whose product
* defines the matrix Q.
* n >= k >= 0.
*
* @param[in] pA
* Details of the QR factorization of the original matrix A as returned
* by plasma_sgeqrf, where the k first columns are the reflectors.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[in] T
* Auxiliary factorization data, computed by plasma_sgeqrf.
*
* @param[out] pQ
* On exit, pointer to the m-by-n matrix Q.
*
* @param[in] ldq
* The leading dimension of the array Q. ldq >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_sorgqr
* @sa plasma_cungqr
* @sa plasma_dorgqr
* @sa plasma_sorgqr
* @sa plasma_sgeqrf
*
******************************************************************************/
int plasma_sorgqr(int m, int n, int k,
float *pA, int lda,
plasma_desc_t T,
float *pQ, int ldq)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0 || n > m) {
plasma_error("illegal value of n");
return -2;
}
if (k < 0 || k > n) {
plasma_error("illegal value of k");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -5;
}
if (ldq < imax(1, m)) {
plasma_error("illegal value of ldq");
return -8;
}
// quick return
if (n <= 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geqrf(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t Q;
int retval;
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, k, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, k, &Q);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = ib*nb; // unmqr: work
retval = plasma_workspace_create(&work, lwork, PlasmaRealFloat);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_sge2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pQ, ldq, Q, &sequence, &request);
// Call the tile async function.
plasma_omp_sorgqr(A, T, Q, work, &sequence, &request);
// Translate Q back to LAPACK layout.
plasma_omp_sdesc2ge(Q, pQ, ldq, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&Q);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_ungqr
*
* Non-blocking tile version of plasma_sorgqr().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[in] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by plasma_sgeqrf.
*
* @param[out] Q
* Descriptor of matrix Q. On exit, matrix Q stored in the tile layout.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For multiplication by Q contains preallocated space for work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_sorgqr
* @sa plasma_omp_cungqr
* @sa plasma_omp_dorgqr
* @sa plasma_omp_sorgqr
* @sa plasma_omp_sgeqrf
*
******************************************************************************/
void plasma_omp_sorgqr(plasma_desc_t A, plasma_desc_t T, plasma_desc_t Q,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(Q) != PlasmaSuccess) {
plasma_error("invalid Q");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (Q.n <= 0)
return;
// Set Q to identity.
plasma_pslaset(PlasmaGeneral, 0.0, 1.0, Q, sequence, request);
// Construct Q.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_psorgqr_tree(A, T, Q, work, sequence, request);
}
else {
plasma_psorgqr(A, T, Q, work, sequence, request);
}
}
|
functionparameter-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// Arrays passed as function parameters
void foo1(double o1[], double c[], int len)
{
int i ;
#pragma omp parallel for
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
double o1[100];
double c[100];
int main()
{
foo1 (o1, c, 100);
return 0;
}
|
jacobi_float_avx2.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#include <math.h>
#define REAL float
static double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define DEFAULT_DIMSIZE 256
void print_array(char *title, char *name, REAL *A, int n, int m) {
printf("%s:\n", title);
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(int n, int m, REAL alpha, REAL *dx, REAL *dy, REAL *u_p, REAL *f_p) {
int i;
int j;
int xx;
int yy;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
//double PI=3.1415926;
*dx = (2.0 / (n - 1));
*dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int) (-1.0 + (*dx * (i - 1))));
yy = ((int) (-1.0 + (*dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (((((-1.0 * alpha) * (1.0 - (xx * xx)))
* (1.0 - (yy * yy))) - (2.0 * (1.0 - (xx * xx))))
- (2.0 * (1.0 - (yy * yy))));
}
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(int n, int m, REAL alpha, REAL dx, REAL dy, REAL *u_p, REAL *f_p) {
int i;
int j;
REAL xx;
REAL yy;
REAL temp;
REAL error;
error = 0.0;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (-1.0 + (dx * (i - 1)));
yy = (-1.0 + (dy * (j - 1)));
temp = (u[i][j] - ((1.0 - (xx * xx)) * (1.0 - (yy * yy))));
error = (error + (temp * temp));
}
error = (sqrt(error) / (n * m));
printf("Solution Error: %2.6g\n", error);
}
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
int main(int argc, char *argv[]) {
int n = DEFAULT_DIMSIZE;
int m = DEFAULT_DIMSIZE;
REAL alpha = 0.0543;
REAL tol = 0.0000000001;
REAL relax = 1.0;
int mits = 5000;
/*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n");
fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n);
fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m);
fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha);
fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol);
fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax);
fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/
if (argc == 2) {
sscanf(argv[1], "%d", &n);
m = n;
}
else if (argc == 3) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
}
else if (argc == 4) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
}
else if (argc == 5) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
}
else if (argc == 6) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
}
else if (argc == 7) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
sscanf(argv[6], "%d", &mits);
}
else {
/* the rest of arg ignored */
}
printf("jacobi %d %d %g %g %g %d\n", n, m, alpha, tol, relax, mits);
printf("------------------------------------------------------------------------------------------------------\n");
/** init the array */
REAL *u = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *uomp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *f = (REAL *) malloc(sizeof(REAL) * n * m);
REAL dx; /* grid spacing in x direction */
REAL dy; /* grid spacing in y direction */
initialize(n, m, alpha, &dx, &dy, u, f);
memcpy(uomp, u, sizeof(REAL) * n * m);
//warming up
jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits);
jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits);
initialize(n, m, alpha, &dx, &dy, u, f);
memcpy(uomp, u, sizeof(REAL) * n * m);
int num_runs = 20;
double elapsed = 0;
for(int i = 0; i < 20; i++) {
double elapsed1 = read_timer_ms();
jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits);
elapsed += read_timer_ms() - elapsed1;
}
printf("seq elasped time(ms): %4f\n", elapsed/num_runs);
//double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
//printf("MFLOPS: %12.6g\n", mflops);
puts("================");
double elapsed2 = 0;
for(int i = 0; i < 20; i++) {
double elapsed3 = read_timer_ms();
jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits);
elapsed2 += read_timer_ms() - elapsed3;
}
printf("OpenMP elasped time(ms): %4f\n", elapsed2/num_runs);
//mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
//printf("MFLOPS: %12.6g\n", mflops);
//print_array("Sequential Run", "u",(REAL*)u, n, m);
error_check(n, m, alpha, dx, dy, u, f);
free(u);
free(f);
free(uomp);
return 0;
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* mits Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL uold[n][m];
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
/* Copy new solution into old */
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < (n - 1); i++)
for (j = 1; j < (m - 1); j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
}
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL *tmp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL (*uold)[m] = (REAL (*)[m]) tmp;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
//printf("===================== iteration %d ===========================\n", k);
/* Copy new solution into old */
for (i = 0; i < n; i++)
#pragma omp simd simdlen(8)
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < (n - 1); i++)
#pragma omp simd simdlen(8) reduction(+:error)
for (j = 1; j < (m - 1); j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
free(tmp);
}
|
trans.c | /*Daala video codec
Copyright (c) 2013 Daala project contributors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
#include <omp.h>
#include <stdlib.h>
#include "od_defs.h"
#include "od_filter.h"
#include "stats_tools.h"
#include "trans_tools.h"
#include "int_search.h"
#include "kiss99.h"
#define USE_FILES (0)
#define USE_AR95 (1)
#define USE_SUBSET1 (0)
#define USE_SUBSET3 (0)
#define PRINT_COV (0)
#define CG_SEARCH (0)
#define USE_SIMPLEX (1)
#define RAMP_DYADIC (0)
#if CG_SEARCH
# if USE_TYPE3 && RAMP_DYADIC
# error "Dyadic ramp constraint not supported for Type-III transform."
# endif
# if USE_SIMPLEX && RAMP_DYADIC
# error "Dyadic ramp constraint not supported with simplex search."
# endif
static void coding_gain_search(const double _r[2*B_SZ]){
# if !USE_SIMPLEX
# if B_SZ==4
{
int f[4];
int p0;
int q0;
int s0;
int s1;
double cg;
double best_cg;
best_cg=0;
# if RAMP_DYADIC
for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){
int t0;
f[3]=q0;
/* S0 = 4/1*(1-q0/64)
* S0 >= 1 -> 64-q0 >= 16
*/
t0=(1<<FILTER_BITS)-q0;
s0=1*t0-0;
if(s0>=(1<<FILTER_BITS-2)){
s0*=4;
f[0]=s0;
for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){
f[2]=p0;
/* S1 = 4/3*(1-(1-q0/64)*p0/64)
* S1 >= 1 -> 64^2-(64-q0)*p0 >= 64*48
* S1 = x/64 -> 64^2-(64-q0)*p0 = 0 MOD 48
*/
s1=(1<<2*FILTER_BITS)-t0*p0;
if(s1>=(1<<FILTER_BITS)*(3<<FILTER_BITS-2)&&s1%(3<<FILTER_BITS-2)==0){
s1/=(3<<FILTER_BITS-2);
f[1]=s1;
cg=coding_gain_1d_collapsed(_r,f);
if(cg>best_cg){
best_cg=cg;
printf("%i %i %i %i %G\n",p0,q0,s0,s1,cg);
}
}
}
}
}
# else
for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){
f[2]=p0;
for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){
f[3]=q0;
for(s0=(1<<FILTER_BITS);s0<=2*(1<<FILTER_BITS);s0++){
f[0]=s0;
for(s1=(1<<FILTER_BITS);s1<=2*(1<<FILTER_BITS);s1++){
f[1]=s1;
cg=coding_gain_1d_collapsed(_r,f);
if(cg>best_cg){
best_cg=cg;
printf("%i %i %i %i %G\n",p0,q0,s0,s1,cg);
}
}
}
}
}
# endif
}
# elif B_SZ==8
{
int f[10];
int p0;
int p1;
int p2;
int q0;
int q1;
int q2;
int s0;
int s1;
int s2;
int s3;
double cg;
double best_cg;
best_cg=0;
# if RAMP_DYADIC
for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){
int t0;
f[7]=q0;
/* S0 = 8/1*(1-q0/64)
* S0 >= 1 -> 64-q0 >= 8
*/
t0=(1<<FILTER_BITS)-q0;
s0=1*t0-0;
if(s0>=(1<<FILTER_BITS-3)){
s0*=8;
f[0]=s0;
for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){
f[4]=p0;
for(q1=(1<<FILTER_BITS);q1>=-(1<<FILTER_BITS);q1--){
int t1;
f[8]=q1;
/* S1 = 8/3*((1-q1/64)-(1-q0/64)*p0/64)
* S1 >= 1 -> 64*t1-t0*p0 >= 64*24
* S1 = x/64 -> 64*t1-t0*p0 = 0 MOD 24
*/
t1=(1<<FILTER_BITS)-q1;
s1=(1<<FILTER_BITS)*t1-t0*p0;
if(s1>=(1<<FILTER_BITS)*(3<<FILTER_BITS-3)&&
s1%(3<<FILTER_BITS-3)==0){
s1/=(3<<FILTER_BITS-3);
f[1]=s1;
for(p1=-(1<<FILTER_BITS);p1<=(1<<FILTER_BITS);p1++){
f[5]=p1;
for(q2=(1<<FILTER_BITS);q2>=-(1<<FILTER_BITS);q2--){
int t2;
f[9]=q2;
/* S2 = 8/5*((1-q2/64)-(1-q1/64)*p1/64)
* S2 >= 1 -> 64*t2-t1*p1) >= 64*40
* S2 = x/64 -> 64*t2-t1*p1 = 0 MOD 40
*/
t2=(1<<FILTER_BITS)-q2;
s2=(1<<FILTER_BITS)*t2-t1*p1;
if(s2>=(1<<FILTER_BITS)*(5<<FILTER_BITS-3)&&
s2%(5<<FILTER_BITS-3)==0){
s2/=(5<<FILTER_BITS-3);
f[2]=s2;
for(p2=-(1<<FILTER_BITS);p2<=(1<<FILTER_BITS);p2++){
f[6]=p2;
/* S3 = 8/7*(1-(1-q2/64)*p2/64)
* S3 >= 1 -> 64^2-t2*p2 >= 64*56
* S3 = x/64 -> 64^2-t2*p2 = 0 MOD 56
*/
s3=(1<<2*FILTER_BITS)-t2*p2;
if(s3>=(1<<FILTER_BITS)*(7<<FILTER_BITS-3)&&
s3%(7<<FILTER_BITS-3)==0){
s3/=(7<<FILTER_BITS-3);
f[3]=s3;
cg=coding_gain_1d_collapsed(_r,f);
if(cg>best_cg){
best_cg=cg;
printf("%i %i %i %i %i %i %i %i %i %i %-24.18G\n",
p0,p1,p2,q0,q1,q2,s0,s1,s2,s3,cg);
}
}
}
}
}
}
}
}
}
}
}
# else
# error "Exhaustive search for B_SZ==8 only supported using RAMP_DYADIC (1)."
# endif
}
# else
# error "Exhaustive search not supported for this block size."
# endif
# else
{
int dims;
int i;
kiss99_ctx ks[NUM_PROCS];
int lb[22];
int ub[22];
# if B_SZ==4
dims=4;
# elif B_SZ==8
dims=10;
# elif B_SZ==16
dims=22;
# else
# error "Unsupported block size."
# endif
for(i=0;i<dims;i++){
lb[i]=i<(B_SZ>>1)?(1<<FILTER_BITS):-(1<<FILTER_BITS);
ub[i]=i<(B_SZ>>1)?2*(1<<FILTER_BITS):(1<<FILTER_BITS);
}
for(i=0;i<NUM_PROCS;i++){
uint32_t srand;
srand=i*16843009; /*Broadcast char to 4xchar*/
kiss99_srand(&ks[i],(unsigned char *)&srand,sizeof(srand));
}
#pragma omp parallel for schedule(dynamic)
for(i=0;i<128;i++){
int tid;
int j;
# if B_SZ==4
int f[4];
# elif B_SZ==8
int f[10];
# elif B_SZ==16
int f[22];
# else
# error "Unsupported block size."
# endif
double cg;
tid=omp_get_thread_num();
for(j=0;j<dims;j++){
int range;
int mask;
int rng;
range=ub[j]-lb[j];
mask=(1<<OD_ILOG_NZ(range))-1;
do {
rng=((int)kiss99_rand(&ks[tid]))&mask;
}
while(rng>range);
f[j]=lb[j]+rng;
}
j=int_simplex_max(&cg,dims,coding_gain_1d_collapsed,_r,lb,ub,f);
fprintf(stdout,"obj=%-24.18G steps=%4d params={",cg,j);
for(j=0;j<dims;j++){
fprintf(stdout,"%3d%c",f[j],j==dims-1?'}':',');
}
fprintf(stdout,"\n");
}
}
# endif
}
#endif
#if USE_FILES
int apply(trans_ctx *_ctx,int _argc,const char *_argv[]){
int ai;
#pragma omp parallel for schedule(dynamic)
for(ai=1;ai<_argc;ai++){
FILE *fin;
video_input vid;
th_info ti;
th_ycbcr_buffer ycbcr;
int tid;
trans_ctx *ctx;
int x0,y0,x1,y1;
fin=fopen(_argv[ai],"rb");
if(fin==NULL){
fprintf(stderr,"Could not open '%s' for reading.\n",_argv[ai]);
continue;
}
if(video_input_open(&vid,fin)<0){
fprintf(stderr,"Error reading video info from '%s'.\n",_argv[ai]);
continue;
}
video_input_get_info(&vid,&ti);
if(video_input_fetch_frame(&vid,ycbcr,NULL)<0){
fprintf(stderr,"Error reading first frame from '%s'.\n",_argv[ai]);
continue;
}
tid=omp_get_thread_num();
ctx=_ctx+tid;
x0 = ti.pic_x;
y0 = ti.pic_y;
x1 = x0 + ti.pic_width;
y1 = y0 + ti.pic_height;
/* start */
fprintf(stderr,"%s\n",_argv[ai]);
image_ctx_init(&ctx->img,_argv[ai],-1,-1);
/* map */
{
int stride=ycbcr[0].stride;
const unsigned char *data=ycbcr[0].data;
int x,y,z;
unsigned char buf[2*B_SZ];
/* add the rows */
for(y=y0;y<y1;y++){
for(x=x0;x<x1-(2*B_SZ_MAX-1);x++){
for(z=0;z<2*B_SZ;z++){
buf[z]=data[y*stride+(x+z)];
}
trans_data_add(&ctx->td,buf);
}
}
/* add the columns */
for(y=y0;y<y1-(2*B_SZ_MAX-1);y++){
for(x=x0;x<x1;x++){
for(z=0;z<2*B_SZ;z++){
buf[z]=data[(y+z)*stride+x];
}
trans_data_add(&ctx->td,buf);
}
}
}
video_input_close(&vid);
}
return EXIT_SUCCESS;
}
#endif
int main(int _argc,const char *_argv[]){
trans_ctx ctx[NUM_PROCS];
const int *f;
int i;
double r[2*B_SZ];
const double *cov;
(void)_argc;
(void)_argv;
#if B_SZ==4
f=OD_FILTER_PARAMS4;
#elif B_SZ==8
f=OD_FILTER_PARAMS8;
#elif B_SZ==16
f=OD_FILTER_PARAMS16;
#else
# error "Need filter params for this block size."
#endif
for(i=0;i<NUM_PROCS;i++){
trans_data_init(&ctx[i].td,2*B_SZ);
}
cov=r;
#if USE_FILES
omp_set_num_threads(NUM_PROCS);
apply(ctx,_argc,_argv);
for(i=1;i<NUM_PROCS;i++){
trans_data_combine(&ctx[0].td,&ctx[i].td);
}
trans_data_normalize(&ctx[0].td);
# if PRINT_COV
trans_data_print(&ctx[0].td,stderr);
# endif
fprintf(stdout,"original cg=%- 24.16G\n",coding_gain_1d(ctx[0].td.cov,f));
trans_data_collapse(&ctx[0].td,1,r);
fprintf(stdout,"collapse cg=%- 24.16G\n",coding_gain_1d_collapsed(r,f));
trans_data_expand(&ctx[0].td,1,r);
fprintf(stdout,"expanded cg=%- 24.16G\n",coding_gain_1d(ctx[0].td.cov,f));
#elif USE_AR95
auto_regressive_collapsed(r,2*B_SZ,1,0.95);
#elif USE_SUBSET1
# if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES
cov=SUBSET1_1D[B_SZ_LOG-OD_LOG_BSIZE0];
# else
# error "Need auto-correlation matrix for subset1 for this block size."
# endif
#elif USE_SUBSET3
# if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES
cov=SUBSET3_1D[B_SZ_LOG-OD_LOG_BSIZE0];
# else
# error "Need auto-correlation matrix for subset3 for this block size."
# endif
#endif
#if CG_SEARCH
coding_gain_search(cov);
#else
fprintf(stdout,"cg=%-24.18G\n",coding_gain_1d_collapsed(cov,f));
#endif
for(i=0;i<NUM_PROCS;i++){
trans_data_clear(&ctx[i].td);
}
return EXIT_SUCCESS;
}
|
base_serialized.h | #include "callback.h"
#include <omp.h>
int main()
{
unsigned int i;
#pragma omp parallel for num_threads(1) schedule(SCHEDULE)
for (i = 0; i < 1; i++) {
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=0x{{[0-f]+}}, invoker={{.+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
GB_AxB_flopcount.c | //------------------------------------------------------------------------------
// GB_AxB_flopcount: compute flops for C=A*B, C<M>=A*B, or C<!M>=A*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// On input, A, B, and M (optional) are matrices for C=A*B, C<M>=A*B, or
// C<!M>=A*B. The flop count for each B(:,j) is computed, and returned as a
// cumulative sum. This function is CSR/CSC agnostic, but for simplicity of
// this description, assume A and B are both CSC matrices, so that ncols(A) ==
// nrows(B). For both CSR and CSC, A->vdim == B->vlen holds. A and/or B may
// be hypersparse, in any combination.
// Bflops has size (B->nvec)+1, for both standard and hypersparse B. Let
// n=B->vdim be the column dimension of B (that is, B is m-by-n).
// If B is a standard CSC matrix then Bflops has size n+1 == B->nvec+1, and on
// output, Bflops [j] is the # of flops required to compute C (:, 0:j-1). B->h
// is NULL, and is implicitly the vector 0:(n-1).
// If B is hypersparse, then let Bh = B->h. Its size is B->nvec, and j = Bh
// [kk] is the (kk)th column in the data structure for B. C will also be
// hypersparse, and only C(:,Bh) will be computed (C may have fewer non-empty
// columns than B). On output, Bflops [kk] is the number of needed flops to
// compute C (:, Bh [0:kk-1]).
// In both cases, Bflops [0] = 0, and Bflops [B->nvec] = total number of flops.
// The size of Bflops is B->nvec+1 so that it has the same size as B->p. The
// first entry of B->p and Bflops are both zero. This allows B to be sliced
// either by # of entries in B (by slicing B->p) or by the flop count required
// (by slicing Bflops).
// This algorithm does not look at the values of M, A, or B, just their
// patterns. The flop count of C=A*B, C<M>=A*B, or C<!M>=A*B is computed for a
// saxpy-based method; the work for A'*B for the dot product method is not
// computed.
// The algorithm scans all nonzeros in B. It only scans at most the min and
// max (first and last) row indices in A and M (if M is present). If A and M
// are not hypersparse, the time taken is O(nnz(B)+n). If all matrices are
// hypersparse, the time is O(nnz(B)*log(h)) where h = max # of vectors present
// in A and M. In pseudo-MATLAB, and assuming B is in standard (not
// hypersparse) form:
/*
[m n] = size (B) ;
Bflops = zeros (1,n+1) ; % (set to zero in the caller)
Mwork = 0 ;
for each column j in B:
if (B (:,j) is empty) continue ;
mjnz = nnz (M (:,j))
if (M is present, not complemented, and M (:,j) is empty) continue ;
im_first = min row index of nonzeros in M(:,j)
im_last = max row index of nonzeros in M(:,j)
Bflops (j) = mjnz if M present, to scatter M(:,j) (M or !M case)
Mwork += mjnz
for each k where B (k,j) is nonzero:
aknz = nnz (A (:,k))
if (aknz == 0) continue ;
alo = min row index of nonzeros in A(:,k)
ahi = max row index of nonzeros in A(:,k)
if (M is present and not complemented)
if (intersection (alo:ahi, im_first:im_last) empty) continue
end
% numerical phase will compute: C(:,j)<#M(:,j)> += A(:,k)*B(k,j)
% where #M is no mask, M, or !M. This typically takes aknz flops,
% or with a binary search if nnz(M(:,j)) << nnz(A(:,k)).
Bflops (j) += aknz
end
end
*/
#include "GB_mxm.h"
#include "GB_ek_slice.h"
#include "GB_bracket.h"
#define GB_FREE_WORK \
{ \
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ; \
GB_FREE_MEMORY (Wfirst, ntasks, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Wlast, ntasks, sizeof (int64_t)) ; \
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_AxB_flopcount
(
int64_t *Mwork, // amount of work to handle the mask M
int64_t *Bflops, // size B->nvec+1 and all zero
const GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, mask is complemented
const GrB_Matrix A,
const GrB_Matrix B,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK_OR_NULL (M, "M for flop count A*B", GB0) ;
ASSERT_MATRIX_OK (A, "A for flop count A*B", GB0) ;
ASSERT_MATRIX_OK (B, "B for flop count A*B", GB0) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (A->vdim == B->vlen) ;
ASSERT (Bflops != NULL) ;
ASSERT (Mwork != NULL) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t bnz = GB_NNZ (B) ;
int64_t bnvec = B->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (bnz + bnvec, chunk, nthreads_max) ;
#ifdef GB_DEBUG
// Bflops must be set to zero in the caller
for (int64_t kk = 0 ; kk <= bnvec ; kk++)
{
ASSERT (Bflops [kk] == 0) ;
}
#endif
//--------------------------------------------------------------------------
// get the mask, if present
//--------------------------------------------------------------------------
bool mask_is_M = (M != NULL && !Mask_comp) ;
const int64_t *GB_RESTRICT Mh = NULL ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
int64_t mnvec = 0 ;
bool M_is_hyper = GB_IS_HYPER (M) ;
if (M != NULL)
{
Mh = M->h ;
Mp = M->p ;
Mi = M->i ;
mnvec = M->nvec ;
}
//--------------------------------------------------------------------------
// get A and B
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ai = A->i ;
int64_t anvec = A->nvec ;
bool A_is_hyper = GB_IS_HYPER (A) ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bi = B->i ;
bool B_is_hyper = GB_IS_HYPER (B) ;
//--------------------------------------------------------------------------
// construct the parallel tasks
//--------------------------------------------------------------------------
// taskid does entries pstart_slice [taskid] to pstart_slice [taskid+1]-1
// and vectors kfirst_slice [taskid] to klast_slice [taskid]. The first
// and last vectors may be shared with prior slices and subsequent slices.
int64_t *GB_RESTRICT Wfirst = NULL ; // size ntasks
int64_t *GB_RESTRICT Wlast = NULL ; // size ntasks
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
ntasks = GB_IMIN (ntasks, bnz) ;
ntasks = GB_IMAX (ntasks, 1) ;
int64_t *pstart_slice, *kfirst_slice, *klast_slice ;
if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, B, ntasks))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_MALLOC_MEMORY (Wfirst, ntasks, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Wlast, ntasks, sizeof (int64_t)) ;
if (Wfirst == NULL || Wlast == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute flop counts for C=A*B, C<M>=A*B, or C<!M>=A*B
//--------------------------------------------------------------------------
int64_t total_Mwork = 0 ;
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:total_Mwork)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [taskid] ;
int64_t klast = klast_slice [taskid] ;
Wfirst [taskid] = 0 ;
Wlast [taskid] = 0 ;
int64_t mpleft = 0 ; // for GB_lookup of the mask M
int64_t task_Mwork = 0 ;
//----------------------------------------------------------------------
// count flops for vectors kfirst to klast of B
//----------------------------------------------------------------------
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
// nnz (B (:,j)), for all tasks
int64_t bjnz = Bp [kk+1] - Bp [kk] ;
// C(:,j) is empty if the entire vector B(:,j) is empty
if (bjnz == 0) continue ;
//------------------------------------------------------------------
// find the part of B(:,j) to be computed by this task
//------------------------------------------------------------------
int64_t pB, pB_end ;
GB_get_pA_and_pC (&pB, &pB_end, NULL,
taskid, kk, kfirst, klast, pstart_slice, NULL, NULL, Bp) ;
int64_t my_bjnz = pB_end - pB ;
int64_t j = (B_is_hyper) ? Bh [kk] : kk ;
//------------------------------------------------------------------
// see if M(:,j) is present and non-empty
//------------------------------------------------------------------
int64_t bjflops = 0 ;
int64_t im_first = -1, im_last = -1 ;
int64_t mjnz = 0 ;
if (M != NULL)
{
int64_t mpright = mnvec - 1 ;
int64_t pM, pM_end ;
GB_lookup (M_is_hyper, Mh, Mp, &mpleft, mpright, j,
&pM, &pM_end) ;
mjnz = pM_end - pM ;
// If M not complemented: C(:,j) is empty if M(:,j) is empty.
if (mjnz == 0 && !Mask_comp) continue ;
if (mjnz > 0)
{
// M(:,j) not empty; get 1st and last index in M(:,j)
im_first = Mi [pM] ;
im_last = Mi [pM_end-1] ;
if (pB == Bp [kk])
{
// this task owns the top part of B(:,j), so it can
// account for the work to access M(:,j), without the
// work being duplicated by other tasks working on
// B(:,j)
bjflops = mjnz ;
// keep track of total work spent examining the mask.
// If any B(:,j) is empty, M(:,j) can be ignored. So
// total_Mwork will be <= nnz (M).
task_Mwork += mjnz ;
}
}
}
int64_t mjnz_much = 64 * mjnz ;
//------------------------------------------------------------------
// trim Ah on right
//------------------------------------------------------------------
// Ah [0..A->nvec-1] holds the set of non-empty vectors of A, but
// only vectors k corresponding to nonzero entries B(k,j) are
// accessed for this vector B(:,j). If nnz (B(:,j)) > 2, prune the
// search space on the right, so the remaining calls to GB_lookup
// will only need to search Ah [pleft...pright-1]. pright does not
// change. pleft is advanced as B(:,j) is traversed, since the
// indices in B(:,j) are sorted in ascending order.
int64_t pleft = 0 ;
int64_t pright = anvec-1 ;
if (A_is_hyper && my_bjnz > 2)
{
// trim Ah [0..pright] to remove any entries past last B(:,j)
GB_bracket_right (Bi [pB_end-1], Ah, 0, &pright) ;
}
//------------------------------------------------------------------
// count the flops to compute C(:,j)<#M(:,j)> = A*B(:,j)
//------------------------------------------------------------------
// where #M is either not present, M, or !M
for ( ; pB < pB_end ; pB++)
{
// B(k,j) is nonzero
int64_t k = Bi [pB] ;
// find A(:,k), reusing pleft since Bi [...] is sorted
int64_t pA, pA_end ;
GB_lookup (A_is_hyper, Ah, Ap, &pleft, pright, k, &pA, &pA_end);
// skip if A(:,k) empty
int64_t aknz = pA_end - pA ;
if (aknz == 0) continue ;
double bkjflops ;
// skip if intersection of A(:,k) and M(:,j) is empty
// and mask is not complemented (C<M>=A*B)
if (mask_is_M)
{
// A(:,k) is non-empty; get first and last index of A(:,k)
int64_t alo = Ai [pA] ;
int64_t ahi = Ai [pA_end-1] ;
if (ahi < im_first || alo > im_last) continue ;
if (aknz > 256 && mjnz_much < aknz)
{
// scan M(:j), and do binary search for A(i,j)
bkjflops = mjnz * (1 + 4 * log2 ((double) aknz)) ;
}
else
{
// scan A(:k), and lookup M(i,j)
bkjflops = aknz ;
}
}
else
{
// A(:,k)*B(k,j) requires aknz flops
bkjflops = aknz ;
}
// increment by flops for the single entry B(k,j)
// C(:,j)<#M(:,j)> += A(:,k)*B(k,j).
bjflops += bkjflops ;
}
//------------------------------------------------------------------
// log the flops for B(:,j)
//------------------------------------------------------------------
if (kk == kfirst)
{
Wfirst [taskid] = bjflops ;
}
else if (kk == klast)
{
Wlast [taskid] = bjflops ;
}
else
{
Bflops [kk] = bjflops ;
}
}
// compute the total work to access the mask, which is <= nnz (M)
total_Mwork += task_Mwork ;
}
//--------------------------------------------------------------------------
// reduce the first and last vector of each slice
//--------------------------------------------------------------------------
// See also Template/GB_reduce_each_vector.c
int64_t kprior = -1 ;
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// sum up the partial flops that taskid computed for kfirst
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [taskid] ;
int64_t klast = klast_slice [taskid] ;
if (kfirst <= klast)
{
int64_t pB = pstart_slice [taskid] ;
int64_t pB_end =
GB_IMIN (Bp [kfirst+1], pstart_slice [taskid+1]) ;
if (pB < pB_end)
{
if (kprior < kfirst)
{
// This task is the first one that did work on
// B(:,kfirst), so use it to start the reduction.
Bflops [kfirst] = Wfirst [taskid] ;
}
else
{
// subsequent task for B(:,kfirst)
Bflops [kfirst] += Wfirst [taskid] ;
}
kprior = kfirst ;
}
}
//----------------------------------------------------------------------
// sum up the partial flops that taskid computed for klast
//----------------------------------------------------------------------
if (kfirst < klast)
{
int64_t pB = Bp [klast] ;
int64_t pB_end = pstart_slice [taskid+1] ;
if (pB < pB_end)
{
/* if */ ASSERT (kprior < klast) ;
{
// This task is the first one that did work on
// B(:,klast), so use it to start the reduction.
Bflops [klast] = Wlast [taskid] ;
}
/*
else
{
// If kfirst < klast and B(:,klast) is not empty,
// then this task is always the first one to do
// work on B(:,klast), so this case is never used.
ASSERT (GB_DEAD_CODE) ;
// subsequent task to work on B(:,klast)
Bflops [klast] += Wlast [taskid] ;
}
*/
kprior = klast ;
}
}
}
//--------------------------------------------------------------------------
// cumulative sum of Bflops
//--------------------------------------------------------------------------
// Bflops = cumsum ([0 Bflops]) ;
ASSERT (Bflops [bnvec] == 0) ;
GB_cumsum (Bflops, bnvec, NULL, nthreads) ;
// Bflops [bnvec] is now the total flop count, including the time to
// compute A*B and to handle the mask. total_Mwork is part of this total
// flop count, but is also returned separtely.
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*Mwork) = total_Mwork ;
return (GrB_SUCCESS) ;
}
|
test-zrocks.c | #include <omp.h>
#include <stdint.h>
#include <stdlib.h>
#include <libzrocks.h>
#include <xztl.h>
#include "CUnit/Basic.h"
/* Number of Objects */
#define TEST_N_BUFFERS 2
/* Number of random objects to read */
#define TEST_RANDOM_ID 2
/* Object Size */
#define TEST_BUFFER_SZ (1024 * 1024 * 16) /* 16 MB */
static uint8_t *wbuf[TEST_N_BUFFERS];
static uint8_t *rbuf[TEST_N_BUFFERS];
static const char **devname;
static void cunit_zrocks_assert_ptr (char *fn, void *ptr)
{
CU_ASSERT ((uint64_t) ptr != 0);
if (!ptr)
printf ("\n %s: ptr %p\n", fn, ptr);
}
static void cunit_zrocks_assert_int (char *fn, uint64_t status)
{
CU_ASSERT (status == 0);
if (status)
printf ("\n %s: %lx\n", fn, status);
}
static int cunit_zrocks_init (void)
{
return 0;
}
static int cunit_zrocks_exit (void)
{
return 0;
}
static void test_zrocks_init (void)
{
int ret;
ret = zrocks_init (*devname);
cunit_zrocks_assert_int ("zrocks_init", ret);
}
static void test_zrocks_exit (void)
{
zrocks_exit ();
}
static void test_zrocks_fill_buffer (uint32_t id)
{
uint32_t byte;
uint8_t value = 0x1;
for (byte = 0; byte < TEST_BUFFER_SZ; byte += 16) {
value += 0x1;
memset (&wbuf[id][byte], value, 16);
}
}
static int test_zrocks_check_buffer (uint32_t id, uint32_t off, uint32_t size)
{
/*printf (" \nMem check:\n");
for (int i = off; i < off + size; i++) {
if (i % 16 == 0 && i)
printf("\n %d-%d ", i - (i%16), (i - (i%16)) + 16);
printf (" %x/%x", wbuf[id][i], rbuf[id][i]);
}
printf("\n");
*/
return memcmp (wbuf[id], rbuf[id], size);
}
static void test_zrocks_new (void)
{
uint32_t ids;
uint64_t id, phys[TEST_N_BUFFERS];
uint32_t size;
uint8_t level;
int ret[TEST_N_BUFFERS];
ids = TEST_N_BUFFERS;
size = TEST_BUFFER_SZ;
level = 0;
#pragma omp parallel for
for (id = 0; id < ids; id++) {
/* Allocate DMA memory */
wbuf[id] = xztl_media_dma_alloc (size, &phys[id]);
cunit_zrocks_assert_ptr ("xztl_media_dma_alloc", wbuf[id]);
if (!wbuf[id])
continue;
test_zrocks_fill_buffer (id);
ret[id] = zrocks_new (id + 1, wbuf[id], size, level);
cunit_zrocks_assert_int ("zrocks_new", ret[id]);
}
}
static void test_zrocks_read (void)
{
uint32_t ids, offset;
uint64_t id, phys[TEST_N_BUFFERS];
int ret[TEST_N_BUFFERS];
size_t read_sz, size;
ids = TEST_N_BUFFERS;
read_sz = 1024 * 64; /* 64 KB */
size = TEST_BUFFER_SZ;
for (id = 0; id < ids; id++) {
/* Allocate DMA memory */
rbuf[id] = xztl_media_dma_alloc (size, &phys[id]);
cunit_zrocks_assert_ptr ("xztl_media_dma_alloc", rbuf[id]);
if (!rbuf[id])
continue;
memset (rbuf[id], 0x0, size);
offset = 0;
while (offset < size) {
ret[id] = zrocks_read_obj (id + 1, offset, rbuf[id] + offset, read_sz);
cunit_zrocks_assert_int ("zrocks_read_obj", ret[id]);
if (ret[id])
printf ("Read error: ID %lu, offset %d, status: %x\n",
id + 1, offset, ret[id]);
offset += read_sz;
}
ret[id] = test_zrocks_check_buffer (id, 0, TEST_BUFFER_SZ);
cunit_zrocks_assert_int ("zrocks_read_obj:check", ret[id]);
if (ret[id])
printf ("Corruption: ID %lu, corrupted: %d bytes\n", id + 1, ret[id]);
xztl_media_dma_free (rbuf[id]);
}
}
static void test_zrocks_random_read (void)
{
uint64_t id, phys;
uint64_t random_off[4] = {63, 24567, 175678, 267192};
size_t random_sz[4] = {532, 53, 2695, 1561};
//uint64_t random_off[1] = {24567};
//size_t random_sz[1] = {53};
int readi, ret;
uint8_t *buf, *woff;
id = TEST_RANDOM_ID;
buf = xztl_media_dma_alloc (1024 * 512, &phys);
cunit_zrocks_assert_ptr ("xztl_media_dma_alloc", buf);
if (!buf)
return;
for (readi = 0; readi < 4; readi++) {
memset (buf, 0x0, random_sz[readi]);
ret = zrocks_read_obj (id, random_off[readi], buf, random_sz[readi]);
cunit_zrocks_assert_int ("zrocks_read_obj", ret);
woff = &wbuf[id - 1][random_off[readi]];
/* Uncomment for a detailed read check (per-byte print)
printf (" \nMem check:\n");
for (int i = 0; i < random_sz[readi] + 4096; i++) {
if (i % 16 == 0)
printf("\n %lu-%lu ",
(i+random_off[readi]) - ((i+random_off[readi]) % 16) + random_off[readi] % 16,
((i+random_off[readi]) - ((i+random_off[readi]) % 16)) + 16 + random_off[readi] % 16);
printf (" %x/%x", woff[i], buf[i]);
}
printf("\n");
*/
cunit_zrocks_assert_int ("zrocks_read_obj:check",
memcmp (woff, buf, random_sz[readi]));
}
xztl_media_dma_free (buf);
for (int i = 0; i < TEST_N_BUFFERS; i++)
xztl_media_dma_free (wbuf[i]);
}
int main (int argc, const char **argv)
{
int failed;
if (argc < 2) {
printf ("Please provide the device path. e.g. liou:/dev/nvme0n2\n");
return -1;
}
devname = &argv[1];
printf ("Device: %s\n", *devname);
CU_pSuite pSuite = NULL;
if (CUE_SUCCESS != CU_initialize_registry())
return CU_get_error();
pSuite = CU_add_suite("Suite_zrocks", cunit_zrocks_init, cunit_zrocks_exit);
if (pSuite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if ((CU_add_test (pSuite, "Initialize ZRocks",
test_zrocks_init) == NULL) ||
(CU_add_test (pSuite, "ZRocks New",
test_zrocks_new) == NULL) ||
(CU_add_test (pSuite, "ZRocks Read",
test_zrocks_read) == NULL) ||
(CU_add_test (pSuite, "ZRocks Random Read",
test_zrocks_random_read) == NULL) ||
(CU_add_test (pSuite, "Close ZRocks",
test_zrocks_exit) == NULL)) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
failed = CU_get_number_of_tests_failed();
CU_cleanup_registry();
return failed;
}
|
concurrent_unordered_map.cuh.h | /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CONCURRENT_UNORDERED_MAP_CUH
#define CONCURRENT_UNORDERED_MAP_CUH
#include <thrust/pair.h>
#include <cassert>
#include <iostream>
#include <iterator>
#include <type_traits>
#include "hash_functions.cuh"
#include "managed.cuh"
#include "managed_allocator.cuh"
// TODO: replace this with CUDA_TRY and propagate the error
#ifndef CUDA_RT_CALL
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) { \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), \
cudaStatus); \
exit(1); \
} \
}
#endif
// TODO: can we do this more efficiently?
__inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare,
int8_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8);
return (int8_t)atomicCAS(base_address, int_comp, int_val);
}
// TODO: can we do this more efficiently?
__inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare,
int16_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 2));
int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8);
return (int16_t)atomicCAS(base_address, int_comp, int_val);
}
__inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare,
int64_t val) {
return (int64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare,
uint64_t val) {
return (uint64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ long long int atomicCAS(long long int* address,
long long int compare,
long long int val) {
return (long long int)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ double atomicCAS(double* address, double compare,
double val) {
return __longlong_as_double(atomicCAS((unsigned long long int*)address,
__double_as_longlong(compare),
__double_as_longlong(val)));
}
__inline__ __device__ float atomicCAS(float* address, float compare,
float val) {
return __int_as_float(
atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val)));
}
__inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) {
return (int64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val) {
return (uint64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
template <typename pair_type>
__forceinline__ __device__ pair_type
load_pair_vectorized(const pair_type* __restrict__ const ptr) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.vec_val = *reinterpret_cast<const uint4*>(ptr);
return converter.pair_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.vec_val = *reinterpret_cast<const uint2*>(ptr);
return converter.pair_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const int*>(ptr);
return converter.pair_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const short*>(ptr);
return converter.pair_val;
} else {
return *ptr;
}
}
template <typename pair_type>
__forceinline__ __device__ void store_pair_vectorized(
pair_type* __restrict__ const ptr, const pair_type val) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.pair_val = val;
*reinterpret_cast<uint4*>(ptr) = converter.vec_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.pair_val = val;
*reinterpret_cast<uint2*>(ptr) = converter.vec_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<int*>(ptr) = converter.vec_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<short*>(ptr) = converter.vec_val;
} else {
*ptr = val;
}
}
template <typename value_type, typename size_type, typename key_type,
typename elem_type>
__global__ void init_hashtbl( // Init every entry of the table with
// <unused_key, unused_value> pair
value_type* __restrict__ const hashtbl_values, const size_type n,
const key_type key_val, const elem_type elem_val) {
const size_type idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
store_pair_vectorized(
hashtbl_values + idx,
thrust::make_pair(
key_val, elem_val)); // Simply store every element a <K, V> pair
}
}
template <typename T>
struct equal_to {
using result_type = bool;
using first_argument_type = T;
using second_argument_type = T;
__forceinline__ __host__ __device__ constexpr bool operator()(
const first_argument_type& lhs, const second_argument_type& rhs) const {
return lhs == rhs;
}
};
template <typename Iterator>
class cycle_iterator_adapter {
public:
using value_type = typename std::iterator_traits<Iterator>::value_type;
using difference_type =
typename std::iterator_traits<Iterator>::difference_type;
using pointer = typename std::iterator_traits<Iterator>::pointer;
using reference = typename std::iterator_traits<Iterator>::reference;
using iterator_type = Iterator;
cycle_iterator_adapter() = delete;
__host__ __device__ explicit cycle_iterator_adapter(
const iterator_type& begin, const iterator_type& end,
const iterator_type& current)
: m_begin(begin), m_end(end), m_current(current) {}
__host__ __device__ cycle_iterator_adapter& operator++() {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ const cycle_iterator_adapter& operator++() const {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ cycle_iterator_adapter& operator++(int) {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ const cycle_iterator_adapter& operator++(int)const {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ bool equal(
const cycle_iterator_adapter<iterator_type>& other) const {
return m_current == other.m_current && m_begin == other.m_begin &&
m_end == other.m_end;
}
__host__ __device__ reference& operator*() { return *m_current; }
__host__ __device__ const reference& operator*() const { return *m_current; }
__host__ __device__ const pointer operator->() const {
return m_current.operator->();
}
__host__ __device__ pointer operator->() { return m_current; }
__host__ __device__ iterator_type getter() const { return m_current; }
private:
iterator_type m_current;
iterator_type m_begin;
iterator_type m_end;
};
template <class T>
__host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return lhs.equal(rhs);
}
template <class T>
__host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return !lhs.equal(rhs);
}
/**
* Does support concurrent insert, but not concurrent insert and probping.
*
* TODO:
* - add constructor that takes pointer to hash_table to avoid allocations
* - extend interface to accept streams
*/
template <typename Key, typename Element, Key unused_key,
typename Hasher = default_hash<Key>,
typename Equality = equal_to<Key>,
typename Allocator = managed_allocator<thrust::pair<Key, Element>>,
bool count_collisions = false>
class concurrent_unordered_map : public managed {
public:
using size_type = size_t;
using hasher = Hasher;
using key_equal = Equality;
using allocator_type = Allocator;
using key_type = Key;
using value_type = thrust::pair<Key, Element>;
using mapped_type = Element;
using iterator = cycle_iterator_adapter<value_type*>;
using const_iterator = const cycle_iterator_adapter<value_type*>;
private:
union pair2longlong {
unsigned long long int longlong;
value_type pair;
};
public:
concurrent_unordered_map(const concurrent_unordered_map&) = delete;
concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete;
explicit concurrent_unordered_map(size_type n,
const mapped_type unused_element,
const Hasher& hf = hasher(),
const Equality& eql = key_equal(),
const allocator_type& a = allocator_type())
: m_hf(hf),
m_equal(eql),
m_allocator(a),
m_hashtbl_size(n),
m_hashtbl_capacity(n),
m_collisions(0),
m_unused_element(
unused_element) { // allocate the raw data of hash table:
// m_hashtbl_values,pre-alloc it on current GPU if UM.
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
constexpr int block_size = 128;
{
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
int dev_id = 0;
CUDA_RT_CALL(cudaGetDevice(&dev_id));
CUDA_RT_CALL(cudaMemPrefetchAsync(
m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, 0));
}
}
// Initialize kernel, set all entry to unused <K,V>
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size>>>(
m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element);
// CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL(cudaStreamSynchronize(0));
CUDA_RT_CALL(cudaGetLastError());
}
~concurrent_unordered_map() {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
}
__host__ __device__ iterator begin() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ const_iterator begin() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ iterator end() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ const_iterator end() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ size_type size() const { return m_hashtbl_size; }
__host__ __device__ value_type* data() const { return m_hashtbl_values; }
__forceinline__ static constexpr __host__ __device__ key_type
get_unused_key() {
return unused_key;
}
// Generic update of a hash table value for any aggregator
template <typename aggregation_type>
__forceinline__ __device__ void update_existing_value(
mapped_type& existing_value, value_type const& insert_pair,
aggregation_type) {
// update without CAS
existing_value = insert_pair.second;
}
__forceinline__ __device__ void accum_existing_value_atomic(
mapped_type& existing_value, value_type const& accum_pair) {
// update with CAS
// existing_value = insert_pair.second;
int num_element =
sizeof(existing_value.data) / sizeof(*(existing_value.data));
const mapped_type& accumulator = accum_pair.second;
for (int i = 0; i < num_element; i++) {
atomicAdd(existing_value.data + i, accumulator.data[i]);
}
// atomicAdd(&existing_value, double val)
}
// TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload
// specifically for the
// types where atomicAdd already has an overload. Otherwise the generic
// update_existing_value will
// be used. Specialization for COUNT aggregator
/*
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int32_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int64_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<float> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<double> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
*/
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Inserts a new (key, value) pair. If the key already exists in
the map
an aggregation operation is performed with the new value and
existing value.
E.g., if the aggregation operation is 'max', then the maximum is
computed
between the new value and existing value and the result is
stored in the map.
*
* @Param[in] x The new (key, value) pair to insert
* @Param[in] op The aggregation operation to perform
* @Param[in] keys_equal An optional functor for comparing two keys
* @Param[in] precomputed_hash Indicates if a precomputed hash value is being
passed in to use
* to determine the write location of the new key
* @Param[in] precomputed_hash_value The precomputed hash value
* @tparam aggregation_type A functor for a binary operation that performs the
aggregation
* @tparam comparison_type A functor for comparing two keys
*
* @Returns An iterator to the newly inserted key,value pair
*/
/* ----------------------------------------------------------------------------*/
template <typename aggregation_type, class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator insert(
const value_type& x, aggregation_type op,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(x.first);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = x.first;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
if (keys_equal(unused_key, old_key) || keys_equal(insert_key, old_key)) {
update_existing_value(existing_value, x, op);
insert_success = true;
}
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
/* This function is not currently implemented
__forceinline__
__host__ __device__ iterator insert(const value_type& x)
{
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
const size_type key_hash = m_hf( x.first );
size_type hash_tbl_idx = key_hash%hashtbl_size;
value_type* it = 0;
while (0 == it) {
value_type* tmp_it = hashtbl_values + hash_tbl_idx;
#ifdef __CUDA_ARCH__
if ( std::numeric_limits<key_type>::is_integer &&
std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int)
== sizeof(value_type)
)
{
pair2longlong converter = {0ull};
converter.pair = thrust::make_pair( unused_key, m_unused_element
);
const unsigned long long int unused = converter.longlong;
converter.pair = x;
const unsigned long long int value = converter.longlong;
const unsigned long long int old_val = atomicCAS(
reinterpret_cast<unsigned long long
int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
} else {
const key_type old_key = atomicCAS( &(tmp_it->first), unused_key,
x.first );
if ( m_equal( unused_key, old_key ) ) {
(m_hashtbl_values+hash_tbl_idx)->second = x.second;
it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
}
#else
#pragma omp critical
{
if ( m_equal( unused_key, tmp_it->first ) ) {
hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first,
x.second );
it = tmp_it;
}
}
#endif
hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size;
}
return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it);
}
*/
__forceinline__ __host__ __device__ const_iterator
find(const key_type& k) const {
size_type key_hash = m_hf(k);
size_type hash_tbl_idx = key_hash % m_hashtbl_size;
value_type* begin_ptr = 0;
size_type counter = 0;
while (0 == begin_ptr) {
value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx;
const key_type tmp_val = tmp_ptr->first;
if (m_equal(k, tmp_val)) {
begin_ptr = tmp_ptr;
break;
}
if (m_equal(unused_key, tmp_val) || counter > m_hashtbl_size) {
begin_ptr = m_hashtbl_values + m_hashtbl_size;
break;
}
hash_tbl_idx = (hash_tbl_idx + 1) % m_hashtbl_size;
++counter;
}
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
begin_ptr);
}
template <typename aggregation_type, typename counter_type,
class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator get_insert(
const key_type& k, aggregation_type op, counter_type* value_counter,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(k);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = k;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
// Situation %5: No slot: All slot in the hashtable is occupied by other
// key, both get and
// insert fail. Return empty iterator
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
volatile mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
// Situation #1: Empty slot: this key never exist in the table, ready to
// insert.
if (keys_equal(unused_key, old_key)) {
// update_existing_value(existing_value, x, op);
existing_value = (mapped_type)(atomicAdd(value_counter, 1));
break;
} // Situation #2+#3: Target slot: This slot is the slot for this key
else if (keys_equal(insert_key, old_key)) {
while (existing_value == m_unused_element) {
// Situation #2: This slot is inserting by another CUDA thread and the
// value is not yet
// ready, just wait
}
// Situation #3: This slot is already ready, get successfully and return
// (iterator of) the
// value
break;
}
// Situation 4: Wrong slot: This slot is occupied by other key, get fail,
// do nothing and
// linear probing to next slot.
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
int assign_async(const concurrent_unordered_map& other,
gpuStream_t stream = 0) {
m_collisions = other.m_collisions;
if (other.m_hashtbl_size <= m_hashtbl_capacity) {
m_hashtbl_size = other.m_hashtbl_size;
} else {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
m_hashtbl_capacity = other.m_hashtbl_size;
m_hashtbl_size = other.m_hashtbl_size;
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
}
CUDA_RT_CALL(cudaMemcpyAsync(m_hashtbl_values, other.m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
cudaMemcpyDefault, stream));
return 0;
}
void clear_async(gpuStream_t stream = 0) {
constexpr int block_size = 128;
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size, 0,
stream>>>(m_hashtbl_values, m_hashtbl_size, unused_key,
m_unused_element);
if (count_collisions) m_collisions = 0;
}
unsigned long long get_num_collisions() const { return m_collisions; }
void print() {
for (size_type i = 0; i < 10; ++i) {
std::cout << i << ": " << m_hashtbl_values[i].first << ","
<< m_hashtbl_values[i].second << std::endl;
}
}
int prefetch(const int dev_id, gpuStream_t stream = 0) {
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
CUDA_RT_CALL(cudaMemPrefetchAsync(m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
dev_id, stream));
}
CUDA_RT_CALL(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream));
return 0;
}
template <class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ const_iterator
accum(const value_type& x, comparison_type keys_equal = key_equal(),
bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const key_type& dst_key = x.first;
auto it = find(dst_key);
if (it == end()) {
return it;
}
value_type* dst = it.getter();
accum_existing_value_atomic(dst->second, x);
return it;
}
private:
const hasher m_hf;
const key_equal m_equal;
const mapped_type m_unused_element;
allocator_type m_allocator;
size_type m_hashtbl_size;
size_type m_hashtbl_capacity;
value_type* m_hashtbl_values;
unsigned long long m_collisions;
};
#endif // CONCURRENT_UNORDERED_MAP_CUH
|
target_enter_data_map_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - -x c++ %s
int main(int argc, char **argv) {
int r;
#pragma omp target enter data // expected-error {{expected at least one map clause for '#pragma omp target enter data'}}
#pragma omp target enter data map(r) // expected-error {{map type must be specified for '#pragma omp target enter data'}}
#pragma omp target enter data map(tofrom: r) // expected-error {{map type 'tofrom' is not allowed for '#pragma omp target enter data'}}
#pragma omp target enter data map(always, to: r)
#pragma omp target enter data map(always, alloc: r)
#pragma omp target enter data map(always, from: r) // expected-error {{map type 'from' is not allowed for '#pragma omp target enter data'}}
#pragma omp target enter data map(release: r) // expected-error {{map type 'release' is not allowed for '#pragma omp target enter data'}}
#pragma omp target enter data map(delete: r) // expected-error {{map type 'delete' is not allowed for '#pragma omp target enter data'}}
return 0;
}
|
saber_avx512_funcs.h |
#ifndef ANAKIN_SABER_FUNCS_IMPL_X86_SABER_AVX512_FUNCS_H
#define ANAKIN_SABER_FUNCS_IMPL_X86_SABER_AVX512_FUNCS_H
#if defined(__AVX512F__)
#include "saber_normal_activation.h"
namespace anakin {
namespace saber {
void avx512_vector_sigmoid(const float* in, int length, float* out) {
const int simd_length = 16;
int remainder = length % simd_length;
int round_length = length / simd_length * simd_length;
#pragma omp parallel for schedule(static)
for (int i = 0; i < length; i += simd_length) {
__m512 temp = Sigmoid(_mm512_loadu_ps(&in[i]));
_mm512_storeu_ps(&out[i], temp);
}
if (remainder > 0) {
__mmask16 vec_mask = 0xffff;
vec_mask = vec_mask >> (simd_length - remainder);
__m512 temp;
temp = _mm512_mask_loadu_ps(temp, vec_mask, &in[round_length]);
_mm512_mask_storeu_ps(&out[round_length], vec_mask, Sigmoid(temp));
}
};
}
}
#endif
#endif //ANAKIN_SABER_AVX512_FUNCS_H
|
command_dist.c | // Copyright 2019 Huiguang Yi. All Rights Reservered.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "command_dist.h"
#include "command_shuffle.h"
#include "iseq2comem.h"
#include "co2mco.h"
#include "mytime.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dirent.h>
#include <err.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/sysinfo.h>
#include <math.h>
#include <tgmath.h>
#include <time.h>
#include <unistd.h>
#include <zlib.h>
#include <stdbool.h>
#include <malloc.h>
#include <libgen.h>
#ifdef _OPENMP
#include <omp.h>
#endif
mem_dispatch_t mem_dispatch = {0,0,0,0,0,0,0, OBJ_TRADITION , STD , MAKE_REF ,0};
mem_usage_stat_t mem_usage_stat = { 0, 0, 1e7 };
static llint real_time_mem;
const char co_dstat[] = "cofiles.stat";
const char mco_dstat[] = "mcofiles.stat";
const char distoutdir[] = "dist";
const char logfpath[] = "dist_log.out";
FILE *logfp;
typedef int mco_co_dist_t[BIN_SZ];
typedef unsigned int ctx_obj_ct_t;
static inline unsigned int * mco_co_mmpdist_core(gidobj_t** unit_arrmco, char *co_fcode_in, unsigned int *ctx_obj_ct_in );
static inline void mco_co_dist_core(gidobj_t** unit_arrmco, char *co_fcode_in, int bin_sz, mco_co_dist_t shared_ctx_num_in);
#define LINE_LEN 1024
typedef struct prt_line { int len; char line[LINE_LEN]; } prt_line_t ;
static inline void output_ctrl (unsigned int X_size, unsigned int XnY_size, print_ctrl_t* outfield, char *rname, prt_line_t* linebuf ) ;
int dist_dispatch(dist_opt_val_t *opt_val)
{
logfp = fpathopen(opt_val->outdir, logfpath,"a") ;
if( opt_val->mmry == 0 )
opt_val->mmry = get_sys_mmry();
llint base_mem = (llint)( (opt_val->mmry - get_sys_mmry()) * BBILLION ) - mem_usage_stat.others ;
real_time_mem = base_mem + (llint)(get_sys_mmry()*BBILLION) ;
fprintf(logfp,"availbe mem.=%lf\treal time mem.=%llu\n", get_sys_mmry(), real_time_mem);
int p_fit_mem = 1;
if( ( opt_val->refpath[0] != '\0' ) )
{
const char *refco_dstat_fpath = test_get_fullpath(opt_val->refpath,co_dstat);
const char *refmco_dstat_fpath = test_get_fullpath(opt_val->refpath,mco_dstat);
if( (refco_dstat_fpath == NULL ) && ( refmco_dstat_fpath == NULL) ){
infile_tab_t *ref_stat = dist_organize_refpath(opt_val);
if(ref_stat->infile_num == 0)
err(errno,"no valid input .fas/.fq file or absent %s | %s in %s \n", co_dstat, mco_dstat, opt_val->refpath ) ;
infile_fmt_count_t* ref_fmt_count;
ref_fmt_count = infile_fmt_count(ref_stat);
if( (ref_stat->infile_num == 0) || (ref_fmt_count->fasta + ref_fmt_count->fastq != ref_stat->infile_num ))
err(errno,"not a valid input files: make sure input files are .fas/.fq format"
"or .co.num |.mco.num files with %s | %s in %s \n", co_dstat, mco_dstat, opt_val->refpath );
const char *dist_rslt_dir = opt_val->outdir;
const char *dist_refco_dir = mk_dist_rslt_dir(dist_rslt_dir,"qry");
int *shuffled_refname_ind = shuffleN( ref_stat->infile_num, 0 );
mem_usage_stat.input_file_name_sz = ref_stat->infile_num * ( sizeof(llong) + PATHLEN*sizeof(char) );
real_time_mem -= mem_usage_stat.input_file_name_sz;
dim_shuffle = get_dim_shuffle(opt_val);
hashsize = get_hashsz(dim_shuffle);
seq2co_global_var_initial();
mem_usage_stat.shuffled_subctx_arr_sz = (1LLU << 4*( dim_shuffle->dim_shuffle_stat.subk) )*sizeof(int);
real_time_mem -= mem_usage_stat.shuffled_subctx_arr_sz;
p_fit_mem = real_time_mem / ( (hashsize + 1) * sizeof(llong));
if( opt_val->p < p_fit_mem )
p_fit_mem = opt_val->p;
else if(p_fit_mem < 1)
err(errno,"dist_dispatch():\n"
" Kmer hashing need mem.(%lf G) exceed the mem. system or user provide (%lf G)\n"
" user can either consider specify more mem.(-m ) or use smaller k value ( -k)\n"
" or increase dimension reduction level ( -L)\n",
(double)hashsize * sizeof(llong)/1e9, opt_val->mmry);
size_t phash_mem = (llong)p_fit_mem * (hashsize + 1) * sizeof(llong);
real_time_mem -= phash_mem;
fprintf(logfp, "COMPONENT_SZ=%d\tcompnum=%d\t.fas:%d\t.fq:%d\tall:%d\nthreadnum:%d\tp hash mem. %lu\n",
COMPONENT_SZ,component_num,ref_fmt_count->fasta,ref_fmt_count->fastq,ref_stat->infile_num,p_fit_mem,phash_mem);
if(ref_fmt_count->fastq >0 )
fprintf(logfp,"quality filter=%d\tKmer Occrence filter=%d\n",opt_val->kmerqlty,opt_val->kmerocrs);
opt_val->abundance = false;
const char *refcostat = run_stageI(opt_val, ref_stat, shuffled_refname_ind, dist_refco_dir, p_fit_mem);
run_stageII(refcostat,opt_val->p);
free(dim_shuffle->shuffled_dim);
real_time_mem += mem_usage_stat.shuffled_subctx_arr_sz;
}
else if ( refco_dstat_fpath != NULL )
run_stageII(refco_dstat_fpath,opt_val->p);
else if(refmco_dstat_fpath != NULL) {;}
free((char*)refco_dstat_fpath);
free((char*)refmco_dstat_fpath);
}
if ( (opt_val->num_remaining_args >0) || (opt_val->fpath[0] != '\0' ) )
{
const char *qryco_dstat_fpath = NULL;
const char *qrymco_dstat_fpath = NULL;
if((opt_val->pipecmd[0]=='\0') && (opt_val->num_remaining_args >0)){
qryco_dstat_fpath = test_get_fullpath(opt_val->remaining_args[0],co_dstat);
qrymco_dstat_fpath = test_get_fullpath(opt_val->remaining_args[0],mco_dstat);
}
if(opt_val->refpath[0] != '\0'){
const char *ref_db = test_get_fullpath(opt_val->refpath, mco_dstat);
if(ref_db==NULL) err(errno,"need speficy the ref-sketch path for -r to run the query-ref search model");
FILE * ref_mco_stat_fp;
if ((ref_mco_stat_fp = fopen(ref_db,"rb")) == NULL) err(errno,"mco stat file:%s",ref_db );
mco_dstat_t mco_ref_dstat;
fread( &mco_ref_dstat, sizeof(mco_dstat_t),1,ref_mco_stat_fp );
fclose(ref_mco_stat_fp);
if( qryco_dstat_fpath != NULL ){
FILE *qry_co_stat_fp;
if (( qry_co_stat_fp = fopen(qryco_dstat_fpath,"rb")) == NULL) err(errno,"qry co stat file:%s",qryco_dstat_fpath);
co_dstat_t co_qry_dstat;
fread( &co_qry_dstat, sizeof(co_dstat_t), 1, qry_co_stat_fp);
if(co_qry_dstat.shuf_id != mco_ref_dstat.shuf_id )
err(errno, "qry shuf_id: %d not match ref shuf_id: %d\ntry regenerate .co dir and feed -s the .shuf"
"file used to generated ref database",co_qry_dstat.shuf_id,mco_ref_dstat.shuf_id);
else if(co_qry_dstat.comp_num != mco_ref_dstat.comp_num)
err(errno, "qry comp_num: %d not match ref comp_num: %d",co_qry_dstat.comp_num, mco_ref_dstat.comp_num);
mco_cbd_koc_compatible_dist(opt_val);
}
else if (qrymco_dstat_fpath != NULL)
err(errno,"when -r specified, the query sould not be .mco format, the valid query format shoulde be .fas/.fq file or .co");
else{
infile_tab_t *infile_stat = dist_organize_infiles(opt_val);
infile_fmt_count_t* qry_fmt_count;
qry_fmt_count = infile_fmt_count(infile_stat);
bool is_valid_fas_fq_in = (infile_stat->infile_num != 0) &&
(qry_fmt_count->fasta + qry_fmt_count->fastq == infile_stat->infile_num );
if(is_valid_fas_fq_in){
}
else err(errno,"please specify valid query genomes seq or .co file for database search");
}
}
else if( qryco_dstat_fpath != NULL) {
if( opt_val->num_remaining_args == 1 ){
run_stageII(qryco_dstat_fpath, opt_val->p);
}
else if (opt_val->num_remaining_args > 1){
combine_queries(opt_val);
}
}
else if ( (qrymco_dstat_fpath != NULL) && (opt_val->num_remaining_args > 1 )){
}
else {
infile_tab_t *infile_stat = dist_organize_infiles(opt_val);
infile_fmt_count_t* qry_fmt_count;
qry_fmt_count = infile_fmt_count(infile_stat);
bool is_valid_fas_fq_in = (infile_stat->infile_num != 0) &&
(qry_fmt_count->fasta + qry_fmt_count->fastq == infile_stat->infile_num );
if(is_valid_fas_fq_in || (opt_val->pipecmd[0] != '\0') ){
const char * dist_rslt_dir = opt_val->outdir;
const char *dist_co_dir = mk_dist_rslt_dir(dist_rslt_dir,"qry");
int *shuffled_refname_ind = shuffleN( infile_stat->infile_num, 0 );
mem_usage_stat.input_file_name_sz = infile_stat->infile_num * ( sizeof(llong) + PATHLEN*sizeof(char) );
real_time_mem -= mem_usage_stat.input_file_name_sz;
dim_shuffle = get_dim_shuffle(opt_val);
hashsize = get_hashsz(dim_shuffle);
seq2co_global_var_initial();
mem_usage_stat.shuffled_subctx_arr_sz = (1LLU << 4*( dim_shuffle->dim_shuffle_stat.subk) )*sizeof(int);
real_time_mem -= mem_usage_stat.shuffled_subctx_arr_sz;
p_fit_mem = real_time_mem / ( (hashsize + 1) * sizeof(llong));
if( opt_val->p < p_fit_mem ) p_fit_mem = opt_val->p;
else if(p_fit_mem < 1)
err(errno,"dist_dispatch():\n"
" Kmer hashing need mem.(%lf G) exceed the mem. system or user provide (%lf G)\n"
" user can either consider specify more mem.(-m ) or use smaller k value ( -k)\n"
" or increase dimension reduction level ( -L)\n",
(double)hashsize * sizeof(llong)/1e9, opt_val->mmry);
size_t phash_mem = (llong)p_fit_mem * (hashsize + 1) * sizeof(llong);
real_time_mem -= phash_mem ;
fprintf(logfp,".fas:%d\t.fq:%d\tall:%d\nthreadnum:%d\tp hash mem. %lu\n",
qry_fmt_count->fasta,qry_fmt_count->fastq, infile_stat->infile_num,p_fit_mem,phash_mem);
if(qry_fmt_count->fastq >0 )
fprintf(logfp,"quality filter=%d\tKmer Occrence filter=%d\n",opt_val->kmerqlty,opt_val->kmerocrs);
run_stageI(opt_val,infile_stat,shuffled_refname_ind,dist_co_dir,p_fit_mem);
}
else err(errno,"not valid raw seq format");
}
}
fclose(logfp);
return 0;
};
dim_shuffle_t *get_dim_shuffle( dist_opt_val_t *opt_val_in )
{
char shuf_infile_name_prefix[PATHLEN+9];
char shuf_infile_name[PATHLEN];
strcpy(shuf_infile_name, opt_val_in->dr_file);
if( strcmp( shuf_infile_name, "" ) == 0 )
{
fprintf(logfp,"addLen=%d\tsubctxlen=%d\n",add_len_drlevel2subk(),add_len_drlevel2subk() + opt_val_in->dr_level);
srand ( time(NULL) );
dim_shuffle_stat_t dim_shuffle_stat =
{
rand(),
opt_val_in->k,
opt_val_in->dr_level + add_len_drlevel2subk(),
opt_val_in->dr_level,
};
struct stat outd;
if( (stat(opt_val_in->outdir, &outd) != 0 ) || (! S_ISDIR(outd.st_mode) ) )
mkdir(opt_val_in->outdir,0777);
sprintf(shuf_infile_name_prefix, "%s/default",opt_val_in->outdir);
write_dim_shuffle_file( &dim_shuffle_stat,shuf_infile_name_prefix);
fprintf(logfp,"subcontext shuffled dimension file: %s.shuf created\n",shuf_infile_name_prefix);
sprintf(shuf_infile_name,"%s.shuf",shuf_infile_name_prefix);
};
return read_dim_shuffle_file(shuf_infile_name);
};
int get_hashsz(dim_shuffle_t *dim_shuffle_in )
{
int dim_reduce_rate = 1 << 4*dim_shuffle_in->dim_shuffle_stat.drlevel;
llong ctx_space_sz = 1LLU << 4*( dim_shuffle_in->dim_shuffle_stat.k - dim_shuffle_in->dim_shuffle_stat.drlevel );
int primer_ind = 4*( dim_shuffle_in->dim_shuffle_stat.k - dim_shuffle_in->dim_shuffle_stat.drlevel ) - CTX_SPC_USE_L - 7;
if(primer_ind < 0 || primer_ind > 24 ){
int k_add = primer_ind < 0 ? (1 + (0-primer_ind)/4) : - (1 + ( primer_ind - 24 )/4) ;
err(errno,"get_hashsz(): primer_ind: %d out of range(0 ~ 24), by formula:\n"
"int primer_ind = 4*(opt_val->k - dim_shuffle->dim_shuffle_stat.drlevel) - CTX_SPC_USE_L - 7\n"
"this might caused by too small or too large k\n"
"kmer length = %d\n"
"dim reduction level = %d\n"
"ctx_space size = %llu\n"
"CTX space usage limit = %lf\n\n"
"try rerun the program with option -k = %d",
primer_ind, dim_shuffle_in->dim_shuffle_stat.k, dim_shuffle_in->dim_shuffle_stat.drlevel,ctx_space_sz,
(double)1/(1 << CTX_SPC_USE_L),dim_shuffle_in->dim_shuffle_stat.k + k_add );
};
int hashsize_get = primer[primer_ind];
fprintf(logfp,"dimension reduced %d\n"
"ctx_space size=%llu\n"
"k=%d\n"
"drlevel=%d\n"
"primer_ind=%d\n"
"hashsize=%u\n",
dim_reduce_rate,ctx_space_sz,dim_shuffle_in->dim_shuffle_stat.k, dim_shuffle_in->dim_shuffle_stat.drlevel, primer_ind, hashsize_get);
return hashsize_get ;
}
const char* test_get_fullpath(const char *parent_path, const char *dstat_f)
{
struct stat path_stat;
if( stat(parent_path, &path_stat) < 0 )
err(errno,"test_get_fullpath()::%s",parent_path);
if( S_ISDIR(path_stat.st_mode) ){
char* fullpath = malloc(PATHLEN+1);
sprintf((char *)fullpath,"%s/%s", parent_path, dstat_f);
FILE *fp;
if ( (fp = fopen(fullpath,"rb")) != NULL ){
fclose(fp);
return fullpath;
}
else{
free((char*)fullpath);
return NULL;
}
}
else
return NULL;
};
const char * run_stageI (dist_opt_val_t *opt_val, infile_tab_t *seqfile_stat,
int* shuffled_seqfname_ind, const char *co_dir, int p_fit_mem)
{
llong **CO = malloc( p_fit_mem * sizeof(llong *) );
for(int i = 0; i< p_fit_mem; i++ ){
CO[i] = (llong *)malloc(hashsize * sizeof(llong) );
}
llong all_ctx_ct = 0 ;
ctx_obj_ct_t *ctx_ct_list = malloc(sizeof(ctx_obj_ct_t) * seqfile_stat->infile_num);
if(opt_val->abundance){
#pragma omp parallel for num_threads(p_fit_mem) reduction(+:all_ctx_ct) schedule(guided)
for(int i = 0; i< seqfile_stat->infile_num; i++){
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
char* seqfname = seqfile_stat->organized_infile_tab[ shuffled_seqfname_ind[i] ].fpath;
char cofname[PATHLEN];
sprintf(cofname,"%s/%d.koc",co_dir,i);
printf("decomposing %s\n",seqfname) ;
llong *co;
if(isOK_fmt_infile(seqfname,fastq_fmt,FQ_FMT_SZ)){
co = fastq2koc(seqfname,CO[tid],opt_val->pipecmd, opt_val->kmerqlty);
ctx_ct_list[i] = write_fqkoc2file(cofname,co);
}
else{
err(errno,"run_stageI(): only .fastq format is allowed in abundance estimate mode");
}
all_ctx_ct += ctx_ct_list[i] ;
}
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int c = 0; c < component_num; c++){
size_t *cof_index_in_cbdco = malloc( (seqfile_stat->infile_num + 1) * sizeof(size_t) );
char indexfname[PATHLEN]; char combined_cof[PATHLEN]; char cofname[PATHLEN];
FILE *com_cofp,*indexfp;
sprintf(combined_cof,"%s/combco.%d",co_dir,c);
sprintf(indexfname,"%s/combco.index.%d",co_dir,c);
if( (com_cofp = fopen(combined_cof,"wb")) == NULL) err(errno,"%s",combined_cof);
if( (indexfp = fopen(indexfname,"wb")) == NULL) err(errno,"%s",indexfname);
llong *tmpco = malloc( sizeof(llong) * (1 << (4*COMPONENT_SZ - CTX_SPC_USE_L)) );
struct stat cof_stat;
FILE *cofp;
cof_index_in_cbdco[0] = 0;
for(int i = 0; i< seqfile_stat->infile_num; i++){
sprintf(cofname,"%s/%d.koc.%d", co_dir,i,c);
if( (cofp = fopen(cofname,"rb")) == NULL) err(errno,"%s",cofname);
stat(cofname,&cof_stat);
int tmpkmerct = cof_stat.st_size / sizeof(llong);
cof_index_in_cbdco[i+1] = (size_t)cof_index_in_cbdco[i] + tmpkmerct;
fread(tmpco,sizeof(llong),tmpkmerct,cofp);
fwrite(tmpco,sizeof(llong),tmpkmerct,com_cofp);
fclose(cofp);
remove(cofname);
}
fclose(com_cofp);
fwrite(cof_index_in_cbdco,sizeof(size_t), seqfile_stat->infile_num + 1, indexfp);
fclose(indexfp);
free(cof_index_in_cbdco);
}
}
if(opt_val->byread){
for(int i = 0; i< seqfile_stat->infile_num; i++){
char* seqfname = seqfile_stat->organized_infile_tab[ shuffled_seqfname_ind[i] ].fpath;
printf("decomposing %s by reads\n",seqfname) ;
reads2mco(seqfname, co_dir, opt_val->pipecmd);
}
}
else {
#pragma omp parallel for num_threads(p_fit_mem) reduction(+:all_ctx_ct) schedule(guided)
for(int i = 0; i< seqfile_stat->infile_num; i++){
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
char* seqfname = seqfile_stat->organized_infile_tab[ shuffled_seqfname_ind[i] ].fpath;
char cofname[PATHLEN];
sprintf(cofname,"%s/%d.co",co_dir,i);
printf("decomposing %s\n",seqfname) ;
llong *co;
if(isOK_fmt_infile(seqfname,fastq_fmt,FQ_FMT_SZ) || opt_val->pipecmd[0]!='\0'){
co = fastq2co(seqfname,CO[tid],opt_val->pipecmd,opt_val->kmerqlty,opt_val->kmerocrs);
ctx_ct_list[i] = write_fqco2file(cofname,co);
}
else{
co = fasta2co(seqfname,CO[tid],opt_val->pipecmd);
ctx_ct_list[i] = wrt_co2cmpn_use_inn_subctx(cofname,co);
}
all_ctx_ct += ctx_ct_list[i] ;
}
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int c = 0; c < component_num; c++){
size_t *cof_index_in_cbdco = malloc( (seqfile_stat->infile_num + 1) * sizeof(size_t) );
char indexfname[PATHLEN]; char combined_cof[PATHLEN]; char cofname[PATHLEN];
FILE *com_cofp,*indexfp;
sprintf(combined_cof,"%s/combco.%d",co_dir,c);
sprintf(indexfname,"%s/combco.index.%d",co_dir,c);
if( (com_cofp = fopen(combined_cof,"wb")) == NULL) err(errno,"%s",combined_cof);
if( (indexfp = fopen(indexfname,"wb")) == NULL) err(errno,"%s",indexfname);
unsigned int *tmpco = malloc( sizeof(unsigned int) * (1 << (4*COMPONENT_SZ - CTX_SPC_USE_L)) );
struct stat cof_stat;
FILE *cofp;
cof_index_in_cbdco[0] = 0;
for(int i = 0; i< seqfile_stat->infile_num; i++){
sprintf(cofname,"%s/%d.co.%d", co_dir, i, c);
if( (cofp = fopen(cofname,"rb")) == NULL) err(errno,"%s",cofname);
stat(cofname,&cof_stat);
int tmpkmerct = cof_stat.st_size/sizeof(unsigned int);
cof_index_in_cbdco[i+1] = (size_t)cof_index_in_cbdco[i] + tmpkmerct;
fread(tmpco,sizeof(unsigned int),tmpkmerct,cofp);
fwrite(tmpco,sizeof(unsigned int),tmpkmerct,com_cofp);
fclose(cofp);
remove(cofname);
}
fclose(com_cofp);
free(tmpco);
fwrite(cof_index_in_cbdco,sizeof(size_t), seqfile_stat->infile_num + 1, indexfp);
fclose(indexfp);
free(cof_index_in_cbdco);
}
}
for(int i = 0; i< p_fit_mem; i++ ) free(CO[i]);
free(CO);
co_dstat_t co_dstat_wrout;
co_dstat_wrout.shuf_id = dim_shuffle->dim_shuffle_stat.id ;
co_dstat_wrout.koc = opt_val->abundance ;
co_dstat_wrout.kmerlen = dim_shuffle->dim_shuffle_stat.k * 2;
co_dstat_wrout.dim_rd_len = dim_shuffle->dim_shuffle_stat.drlevel * 2 ;
co_dstat_wrout.comp_num = component_num ;
co_dstat_wrout.infile_num = seqfile_stat->infile_num;
co_dstat_wrout.all_ctx_ct = all_ctx_ct;
char *co_dstat_fullname = malloc(PATHLEN*sizeof(char) );
sprintf(co_dstat_fullname, "%s/%s",co_dir,co_dstat);
FILE *coutfp;
if ( ( coutfp = fopen(co_dstat_fullname,"wb")) == NULL ) err(errno,"%s",co_dstat_fullname);
fwrite(&co_dstat_wrout,sizeof(co_dstat_wrout),1,coutfp);
fwrite(ctx_ct_list,sizeof(ctx_obj_ct_t),co_dstat_wrout.infile_num,coutfp);
free(ctx_ct_list);
for(int i = 0; i< co_dstat_wrout.infile_num; i++)
fwrite(seqfile_stat->organized_infile_tab[ shuffled_seqfname_ind[i] ].fpath,PATHLEN,1,coutfp);
fclose(coutfp);
return (const char *)co_dstat_fullname;
}
void run_stageII(const char * co_dstat_fpath, int p_fit_mem)
{
const char* dist_co_dir = get_pathname(co_dstat_fpath,co_dstat);
const char* dist_rslt_dir = malloc(PATHLEN*sizeof(char));
sprintf((char*)dist_rslt_dir,"%s/..",dist_co_dir);
const char* dist_mco_dir = mk_dist_rslt_dir(dist_rslt_dir,"ref");
const char* mco_dstat_fpath = malloc(PATHLEN*sizeof(char));
sprintf((char*)mco_dstat_fpath,"%s/%s",dist_mco_dir,mco_dstat);
FILE *co_stat_fp,*mco_stat_fp;
if( ( co_stat_fp = fopen(co_dstat_fpath,"rb")) == NULL ) err(errno,"run_stageII(():%s",co_dstat_fpath);
co_dstat_t co_dstat_readin;
fread( &co_dstat_readin, sizeof(co_dstat_t),1,co_stat_fp );
if (co_dstat_readin.koc) err(errno,"run_stageII(): can not build reference database use koc file, you may want provie these files as query");
if( ( mco_stat_fp = fopen(mco_dstat_fpath,"wb")) == NULL ) err(errno,"run_stageII(():%s",mco_dstat_fpath);
mco_dstat_t mco_dstat_writeout;
mco_dstat_writeout.shuf_id = co_dstat_readin.shuf_id;
mco_dstat_writeout.kmerlen = co_dstat_readin.kmerlen ;
mco_dstat_writeout.dim_rd_len = co_dstat_readin.dim_rd_len ;
mco_dstat_writeout.infile_num = co_dstat_readin.infile_num;
mco_dstat_writeout.comp_num = co_dstat_readin.comp_num ;
fwrite(&mco_dstat_writeout,sizeof(mco_dstat_writeout),1, mco_stat_fp );
ctx_obj_ct_t *tmp_ctx_ct = malloc(sizeof(ctx_obj_ct_t)*co_dstat_readin.infile_num);
fread(tmp_ctx_ct,sizeof(ctx_obj_ct_t),co_dstat_readin.infile_num,co_stat_fp);
fwrite(tmp_ctx_ct,sizeof(ctx_obj_ct_t),co_dstat_readin.infile_num,mco_stat_fp);
char (*tmpname)[PATHLEN] = malloc( PATHLEN * co_dstat_readin.infile_num );
fread(tmpname,PATHLEN,co_dstat_readin.infile_num,co_stat_fp);
fwrite(tmpname,PATHLEN,co_dstat_readin.infile_num,mco_stat_fp);
free(tmp_ctx_ct);
free(tmpname);
fclose(co_stat_fp);
fclose(mco_stat_fp);
cdb_kmerf2kmerdb(dist_mco_dir,dist_co_dir,co_dstat_readin.infile_num,co_dstat_readin.comp_num,p_fit_mem);
free((char*)dist_co_dir );
free((char*)dist_rslt_dir);
free((char*)dist_mco_dir);
free((char*)mco_dstat_fpath);
}
int alp_size = 4 ;
static ctx_obj_ct_t initial_dist[BIN_SZ];
static int ref_seq_num,qry_seq_num,kmerlen,dim_reduct_len ;
void mco_co_dist( char *refmco_dname, char *qryco_dname, const char *distout_dir, int p_fit_mem)
{
fprintf(logfp,"run mco_co_dist(), %d threads used\n",p_fit_mem);
FILE *refmco_dstat_fp, *qryco_dstat_fp;
char *refmco_dstat_fpath = malloc(PATHLEN*sizeof(char));
char *qryco_dstat_fpath = malloc(PATHLEN*sizeof(char));
sprintf(refmco_dstat_fpath,"%s/%s",refmco_dname,mco_dstat);
sprintf(qryco_dstat_fpath,"%s/%s",qryco_dname,co_dstat);
if( (refmco_dstat_fp = fopen(refmco_dstat_fpath,"rb")) == NULL )
err(errno,"need provied mco dir path for mco_co_dist() arg 1. refmco_dstat_fpath");
if( (qryco_dstat_fp = fopen(qryco_dstat_fpath,"rb")) == NULL )
err(errno,"need provied co dir path for mco_co_dist() arg 2. qryco_dstat_fpath");
mco_dstat_t mco_dstat_readin ;
co_dstat_t co_dstat_readin ;
fread(&mco_dstat_readin,sizeof(mco_dstat_readin),1,refmco_dstat_fp);
fread(&co_dstat_readin,sizeof(co_dstat_readin),1,qryco_dstat_fp);
unsigned int * qry_ctx_ct_list = malloc(co_dstat_readin.infile_num * sizeof(unsigned int));
unsigned int * ref_ctx_ct_list = malloc(mco_dstat_readin.infile_num * sizeof(unsigned int));
fread(qry_ctx_ct_list,sizeof(unsigned int),co_dstat_readin.infile_num,qryco_dstat_fp);
fread(ref_ctx_ct_list,sizeof(unsigned int),mco_dstat_readin.infile_num,refmco_dstat_fp);
char (*cofname)[PATHLEN] = malloc(co_dstat_readin.infile_num * PATHLEN);
char (*mcofname)[PATHLEN] = malloc(mco_dstat_readin.infile_num * PATHLEN);
fread(cofname,PATHLEN,co_dstat_readin.infile_num,qryco_dstat_fp);
fread(mcofname,PATHLEN,mco_dstat_readin.infile_num,refmco_dstat_fp);
fclose(refmco_dstat_fp);
fclose(qryco_dstat_fp);
if( !(mco_dstat_readin.comp_num == co_dstat_readin.comp_num) )
err(errno,"query args not match ref args: ref.comp_num = %d vs. %d = qry.comp_num",
mco_dstat_readin.comp_num, co_dstat_readin.comp_num);
if(!(mco_dstat_readin.shuf_id == co_dstat_readin.shuf_id))
err(errno,"query args not match ref args: ref.shuf_id = %d vs. %d = qry.shuf_id",
mco_dstat_readin.shuf_id, co_dstat_readin.shuf_id);
int ref_bin_num = mco_dstat_readin.infile_num / BIN_SZ;
int binsz;
for(int i=0; i<=ref_bin_num;i++ ){
if( i == ref_bin_num ){
binsz = mco_dstat_readin.infile_num % BIN_SZ;
if(binsz == 0) continue;
}else binsz = BIN_SZ;
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for( int k=0; k< co_dstat_readin.infile_num; k++){
if(qry_ctx_ct_list[k]==0){
warnx("%dth co file is empty",k);
continue;
}
char dist_fcode[PATHLEN];
sprintf(dist_fcode,"%s/%d.%d.dist", distout_dir, i, k );
FILE *distfp;
if ( (distfp = fopen(dist_fcode,"wb")) == NULL) err(errno,"mco_co_dist()::%s",dist_fcode);
fwrite(initial_dist, sizeof(ctx_obj_ct_t), binsz, distfp);
fclose(distfp);
}
fprintf(logfp,"all %d co files' distance file initialized\n",co_dstat_readin.infile_num);
for ( int j = 0; j < mco_dstat_readin.comp_num; j++ ){
char mco_fcode[PATHLEN];
sprintf(mco_fcode,"%s/%d.mco.%d",refmco_dname,i,j);
gidobj_t** unit_arrmco_readin = read_unit_arrmco_file(mco_fcode);
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int k=0; k< co_dstat_readin.infile_num; k++){
if(qry_ctx_ct_list[k]==0) continue;
int tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
#endif
char co_fcode[PATHLEN]; char dist_fcode[PATHLEN];
sprintf(co_fcode,"%s/%d.%d.co.%d",qryco_dname,k/BIN_SZ, k % BIN_SZ, j);
sprintf(dist_fcode,"%s/%d.%d.dist",distout_dir, i , k );
int fd;
if( ( (fd = open(dist_fcode,O_RDWR, 0600) ) == -1) )
err(errno,"mco_co_dist()::distfile = %s[tid = %d]",dist_fcode,tid);
ctx_obj_ct_t * ctx_obj_ct = mmap(NULL, binsz*(sizeof(ctx_obj_ct_t)), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if(ctx_obj_ct == MAP_FAILED) err(errno,"ctx_obj_ct mmap error");
ctx_obj_ct = mco_co_mmpdist_core(unit_arrmco_readin,co_fcode,ctx_obj_ct);
if ( msync( ctx_obj_ct, binsz*(sizeof(ctx_obj_ct_t)), MS_SYNC ) < 0 )
err(errno,"mco_co_dist()::ctx_obj_ct msync failed");
munmap(ctx_obj_ct,binsz*(sizeof(ctx_obj_ct_t)));
close(fd);
}
free_unit_arrmco(unit_arrmco_readin);
}
}
ref_seq_num = mco_dstat_readin.infile_num ;
qry_seq_num = co_dstat_readin.infile_num ;
kmerlen = co_dstat_readin.kmerlen;
dim_reduct_len = co_dstat_readin.dim_rd_len;
char distf[PATHLEN];
sprintf(distf, "%s/distance.out", distout_dir);
fprintf(logfp,"distance output to : %s\n",distf);
printf("distance output to : %s\n",distf);
FILE *distfp;
if( (distfp = fopen(distf,"a")) == NULL ) err(errno,"mco_co_dist():%s",distf);
for(int i=0; i<=ref_bin_num;i++ ){
for ( int k = 0; k < co_dstat_readin.infile_num; k++ ){
if(qry_ctx_ct_list[k]>0)
fname_dist_print(i,k,distout_dir,ref_ctx_ct_list,qry_ctx_ct_list,mcofname,cofname,distfp);
}
}
fclose(distfp);
free(ref_ctx_ct_list);
free(qry_ctx_ct_list);
free(mcofname);
free(cofname);
}
void mco_cbd_co_dist(dist_opt_val_t *opt_val_in)
{
int p_fit_mem = opt_val_in->p;
llong mem_limit = (llong)opt_val_in->mmry*BBILLION;
char *refmco_dname = opt_val_in->refpath;
char *qryco_dname = opt_val_in->remaining_args[0];
const char *distout_dir = opt_val_in->outdir;
fprintf(logfp,"run mco_cbd_co_dist(), %d threads used\n",p_fit_mem);
printf("run mco_cbd_co_dist(), %fG memory used\t%d threads used\n",opt_val_in->mmry,p_fit_mem);
FILE *refmco_dstat_fp, *qryco_dstat_fp;
char *refmco_dstat_fpath = malloc(PATHLEN*sizeof(char));
char *qryco_dstat_fpath = malloc(PATHLEN*sizeof(char));
sprintf(refmco_dstat_fpath,"%s/%s",refmco_dname,mco_dstat);
sprintf(qryco_dstat_fpath,"%s/%s",qryco_dname,co_dstat);
if( (refmco_dstat_fp = fopen(refmco_dstat_fpath,"rb")) == NULL )
err(errno,"need provied mco dir path for mco_co_dist() arg 1. refmco_dstat_fpath");
if( (qryco_dstat_fp = fopen(qryco_dstat_fpath,"rb")) == NULL )
err(errno,"need provied co dir path for mco_co_dist() arg 2. qryco_dstat_fpath");
mco_dstat_t mco_dstat_readin ;
co_dstat_t co_dstat_readin ;
fread(&mco_dstat_readin,sizeof(mco_dstat_readin),1,refmco_dstat_fp);
fread(&co_dstat_readin,sizeof(co_dstat_readin),1,qryco_dstat_fp);
ctx_obj_ct_t * qry_ctx_ct_list = malloc(co_dstat_readin.infile_num * sizeof(ctx_obj_ct_t));
ctx_obj_ct_t * ref_ctx_ct_list = malloc(mco_dstat_readin.infile_num * sizeof(ctx_obj_ct_t));
fread(qry_ctx_ct_list,sizeof(ctx_obj_ct_t),co_dstat_readin.infile_num,qryco_dstat_fp);
fread(ref_ctx_ct_list,sizeof(ctx_obj_ct_t),mco_dstat_readin.infile_num,refmco_dstat_fp);
char (*cofname)[PATHLEN] = malloc(co_dstat_readin.infile_num * PATHLEN);
char (*mcofname)[PATHLEN] = malloc(mco_dstat_readin.infile_num * PATHLEN);
fread(cofname,PATHLEN,co_dstat_readin.infile_num,qryco_dstat_fp);
fread(mcofname,PATHLEN,mco_dstat_readin.infile_num,refmco_dstat_fp);
fclose(refmco_dstat_fp);
fclose(qryco_dstat_fp);
if( !(mco_dstat_readin.comp_num == co_dstat_readin.comp_num) )
err(errno,"query args not match ref args: ref.comp_num = %d vs. %d = qry.comp_num",
mco_dstat_readin.comp_num, co_dstat_readin.comp_num);
if(!(mco_dstat_readin.shuf_id == co_dstat_readin.shuf_id))
err(errno,"query args not match ref args: ref.shuf_id = %d vs. %d = qry.shuf_id",
mco_dstat_readin.shuf_id, co_dstat_readin.shuf_id);
int ref_bin_num = mco_dstat_readin.infile_num / BIN_SZ;
if( mco_dstat_readin.infile_num % BIN_SZ > 0 ) ref_bin_num +=1;
char onedist[PATHLEN];
sprintf(onedist,"%s/sharedk_ct.dat",distout_dir);
int dist_bfp = open(onedist,O_RDWR,0600) ;
if (dist_bfp == -1) {
close(dist_bfp);
dist_bfp = open(onedist,O_RDWR|O_CREAT, 0600) ;
if (dist_bfp == -1) err(errno,"mco_cbd_co_dist()::%s",onedist);
}
else{
errno = EEXIST;
err(errno,"mco_cbd_co_dist()::%s",onedist);
}
size_t disf_sz = (size_t)mco_dstat_readin.infile_num*co_dstat_readin.infile_num*sizeof(ctx_obj_ct_t) ;
if(ftruncate(dist_bfp, disf_sz) == -1) err(errno,"mco_cbd_co_dist()::ftruncate");
close(dist_bfp);
dist_bfp = open(onedist,O_RDWR, 0600);
if (dist_bfp == -1) err(errno,"mco_cbd_co_dist()::%s",onedist);
ctx_obj_ct_t *ctx_obj_ct = mmap(NULL,disf_sz,PROT_READ | PROT_WRITE,MAP_SHARED ,dist_bfp,0);
if(ctx_obj_ct == MAP_FAILED) err(errno,"ctx_obj_ct mmap error");
close(dist_bfp);
int page_sz = sysconf(_SC_PAGESIZE);
int comp_sz = (1 << 4*COMPONENT_SZ);
if( comp_sz % page_sz != 0 ) err(errno,"comp_sz %d is not multiple of page_sz %d ",comp_sz,page_sz );
int num_unit_mem = mem_limit / (mco_dstat_readin.infile_num*sizeof(ctx_obj_ct_t) * page_sz);
if(num_unit_mem < 1) err(errno,"at least %fG memory needed to map ./onedist, specify more memory use -m",
(float)mco_dstat_readin.infile_num*sizeof(ctx_obj_ct_t) * page_sz/1073741824 );
int num_cof_batch = num_unit_mem*page_sz;
size_t unitsz_distf_mapped = (size_t)num_cof_batch * mco_dstat_readin.infile_num * sizeof(ctx_obj_ct_t) ;
int num_mapping_distf = co_dstat_readin.infile_num / num_cof_batch ;
size_t maplength;
int bnum_infile;
FILE *cbd_fcode_comp_fp,*cbd_fcode_comp_index_fp;
struct stat cbd_fcode_stat;
size_t *fco_pos = malloc(sizeof(size_t) * (co_dstat_readin.infile_num + 1) );
size_t *mco_offset_index = malloc(sizeof(size_t) * comp_sz);
unsigned int *mco_bin_index = malloc( sizeof(unsigned int) * comp_sz * ref_bin_num );
gidobj_t* mco_mem = malloc( sizeof(gidobj_t) * 442317172 );
char mco_fcode[PATHLEN]; char mco_index_fcode[PATHLEN];
char co_cbd_fcode[PATHLEN];char co_cbd_index_fcode[PATHLEN];
for(int b=0;b<=num_mapping_distf;b++){
if(b==num_mapping_distf){
bnum_infile = co_dstat_readin.infile_num % num_cof_batch ;
if( bnum_infile == 0 ) continue;
}else bnum_infile = num_cof_batch;
maplength = (size_t)bnum_infile * mco_dstat_readin.infile_num * sizeof(ctx_obj_ct_t);
printf("disf_sz=%lu\trefnum=%d\tqrynum=%d\tnum_mapping_distf=%dbnum_infile=%d\t\t%lu\t%lu\tflag1:Ok\n",
disf_sz,mco_dstat_readin.infile_num,co_dstat_readin.infile_num,num_mapping_distf,bnum_infile,maplength,(size_t)b*unitsz_distf_mapped);
for ( int j = 0; j < mco_dstat_readin.comp_num; j++ ) {
sprintf(mco_index_fcode,"%s/mco.index.%d",refmco_dname,j);
sprintf(mco_fcode,"%s/mco.%d",refmco_dname,j);
FILE *indexfp, *mcofp;
if( (indexfp = fopen(mco_index_fcode,"rb"))==NULL) err(errno,"mco_cbd_co_dist()::%s",mco_index_fcode);
fread(mco_offset_index,sizeof(size_t),comp_sz,indexfp);
fread(mco_bin_index,sizeof(unsigned int),comp_sz * ref_bin_num,indexfp);
fclose(indexfp);
struct stat s;
if( (mcofp = fopen(mco_fcode,"rb"))==NULL) err(errno,"mco_cbd_co_dist()::%s",mco_fcode);
stat(mco_fcode, &s);
fread(mco_mem,sizeof(gidobj_t),s.st_size/sizeof(gidobj_t),mcofp);
fclose(mcofp);
sprintf(co_cbd_fcode,"%s/combco.%d",qryco_dname,j);
if( (cbd_fcode_comp_fp = fopen(co_cbd_fcode,"rb"))==NULL) err(errno,"mco_cbd_co_dist()::%s",co_cbd_fcode);
stat(co_cbd_fcode, &cbd_fcode_stat);
if(co_dstat_readin.koc){
}
unsigned int *cbd_fcode_mem = malloc(cbd_fcode_stat.st_size);
fread(cbd_fcode_mem,sizeof(unsigned int),cbd_fcode_stat.st_size/sizeof(unsigned int),cbd_fcode_comp_fp);
fclose(cbd_fcode_comp_fp);
sprintf(co_cbd_index_fcode,"%s/combco.index.%d",qryco_dname,j);
if( (cbd_fcode_comp_index_fp = fopen(co_cbd_index_fcode,"rb"))==NULL)
err(errno,"mco_cbd_co_dist()::%s",co_cbd_index_fcode);
fread(fco_pos,sizeof(size_t),co_dstat_readin.infile_num + 1 ,cbd_fcode_comp_index_fp);
fclose(cbd_fcode_comp_index_fp);
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int kind = 0; kind < bnum_infile; kind++){
int k = b*num_cof_batch + kind;
if(qry_ctx_ct_list[k]==0) continue;
unsigned int ind, mcogid, pos;
llong distf_offset = (size_t)k * mco_dstat_readin.infile_num;
for(int n = 0; n < fco_pos[k+1] - fco_pos[k]; n++){
ind = cbd_fcode_mem[ fco_pos[k] + n ];
pos = 0;
for(int bin=0; bin < ref_bin_num; bin++){
int bin_gnum = mco_bin_index[ ind *ref_bin_num + bin];
for(int g = 0; g < bin_gnum ; g++ ){
mcogid = bin*BIN_SZ + mco_mem[ mco_offset_index[ind] + pos ];
ctx_obj_ct[distf_offset + mcogid ]++ ;
pos++;
}
}
}
}
free(cbd_fcode_mem);
munmap(mco_offset_index, comp_sz * sizeof(size_t));
munmap(mco_bin_index,comp_sz*ref_bin_num*sizeof(unsigned int) );
}
if ( msync( ctx_obj_ct + (size_t)b*num_cof_batch*mco_dstat_readin.infile_num, maplength, MS_SYNC ) < 0 )
err(errno,"mco_cbd_co_dist()::ctx_obj_ct msync failed");
munmap(ctx_obj_ct + (size_t)b*num_cof_batch*mco_dstat_readin.infile_num, maplength);
}
free(fco_pos);
ref_seq_num = mco_dstat_readin.infile_num ;
qry_seq_num = co_dstat_readin.infile_num ;
kmerlen = co_dstat_readin.kmerlen;
dim_reduct_len = co_dstat_readin.dim_rd_len;
char distf[PATHLEN];
sprintf(distf, "%s/distance.out", distout_dir);
//fprintf(logfp,"distance output to : %s\n",distf);
//printf("distance output to : %s\n",distf);
dist_print_nobin(distout_dir,ref_seq_num, qry_seq_num, ref_ctx_ct_list, qry_ctx_ct_list,num_cof_batch,mcofname, cofname, opt_val_in);
free(ref_ctx_ct_list);
free(qry_ctx_ct_list);
free(mcofname);
free(cofname);
}
typedef struct koc_dist {
llong shared_koc_ct;
ctx_obj_ct_t shared_k_ct;
} koc_dist_t;
void mco_cbd_koc_compatible_dist(dist_opt_val_t *opt_val_in)
{
int p_fit_mem = opt_val_in->p;
llong mem_limit = (llong)opt_val_in->mmry*BBILLION;
char *refmco_dname = opt_val_in->refpath;
char *qryco_dname = opt_val_in->remaining_args[0];
const char *distout_dir = opt_val_in->outdir;
fprintf(logfp,"mco_cbd_compatible_dist(): %d threads used\n",p_fit_mem);
FILE *refmco_dstat_fp, *qryco_dstat_fp;
char *refmco_dstat_fpath = malloc(PATHLEN*sizeof(char));
char *qryco_dstat_fpath = malloc(PATHLEN*sizeof(char));
sprintf(refmco_dstat_fpath,"%s/%s",refmco_dname,mco_dstat);
sprintf(qryco_dstat_fpath,"%s/%s",qryco_dname,co_dstat);
if( (refmco_dstat_fp = fopen(refmco_dstat_fpath,"rb")) == NULL )
err(errno,"need provied mco dir path for mco_co_dist() arg 1. refmco_dstat_fpath");
if( (qryco_dstat_fp = fopen(qryco_dstat_fpath,"rb")) == NULL )
err(errno,"need provied co dir path for mco_co_dist() arg 2. qryco_dstat_fpath");
mco_dstat_t mco_dstat_readin ;
co_dstat_t co_dstat_readin ;
fread(&mco_dstat_readin,sizeof(mco_dstat_readin),1,refmco_dstat_fp);
fread(&co_dstat_readin,sizeof(co_dstat_readin),1,qryco_dstat_fp);
ctx_obj_ct_t * qry_ctx_ct_list = malloc(co_dstat_readin.infile_num * sizeof(ctx_obj_ct_t));
ctx_obj_ct_t * ref_ctx_ct_list = malloc(mco_dstat_readin.infile_num * sizeof(ctx_obj_ct_t));
fread(qry_ctx_ct_list,sizeof(ctx_obj_ct_t),co_dstat_readin.infile_num,qryco_dstat_fp);
fread(ref_ctx_ct_list,sizeof(ctx_obj_ct_t),mco_dstat_readin.infile_num,refmco_dstat_fp);
char (*cofname)[PATHLEN] = malloc(co_dstat_readin.infile_num * PATHLEN);
char (*mcofname)[PATHLEN] = malloc(mco_dstat_readin.infile_num * PATHLEN);
fread(cofname,PATHLEN,co_dstat_readin.infile_num,qryco_dstat_fp);
fread(mcofname,PATHLEN,mco_dstat_readin.infile_num,refmco_dstat_fp);
fclose(refmco_dstat_fp);
fclose(qryco_dstat_fp);
if( !(mco_dstat_readin.comp_num == co_dstat_readin.comp_num) )
err(errno,"query args not match ref args: ref.comp_num = %d vs. %d = qry.comp_num",
mco_dstat_readin.comp_num, co_dstat_readin.comp_num);
if(!(mco_dstat_readin.shuf_id == co_dstat_readin.shuf_id))
err(errno,"query args not match ref args: ref.shuf_id = %d vs. %d = qry.shuf_id",
mco_dstat_readin.shuf_id, co_dstat_readin.shuf_id);
int ref_bin_num = mco_dstat_readin.infile_num / BIN_SZ;
if( mco_dstat_readin.infile_num % BIN_SZ > 0 ) ref_bin_num +=1;
char onedist[PATHLEN];
sprintf(onedist,"%s/sharedk_ct.dat",distout_dir);
int dist_bfp = open(onedist,O_RDWR,0600) ;
if (dist_bfp == -1) {
close(dist_bfp);
dist_bfp = open(onedist,O_RDWR|O_CREAT, 0600) ;
if (dist_bfp == -1) err(errno,"mco_cbd_co_dist()::%s",onedist);
}else{
errno = EEXIST;
err(errno,"mco_cbd_co_dist()::%s",onedist);
}
int page_sz = sysconf(_SC_PAGESIZE);
int comp_sz = (1 << 4*COMPONENT_SZ);
if( comp_sz % page_sz != 0 ) err(errno,"comp_sz %d is not multiple of page_sz %d ",comp_sz,page_sz );
size_t maplength;
int bnum_infile;
FILE *cbd_fcode_comp_fp,*cbd_fcode_comp_index_fp;
struct stat cbd_fcode_stat;
size_t *fco_pos = malloc(sizeof(size_t) * (co_dstat_readin.infile_num + 1) );
size_t *mco_offset_index = malloc(sizeof(size_t) * comp_sz);
unsigned int *mco_bin_index = malloc( sizeof(unsigned int) * comp_sz * ref_bin_num );
gidobj_t* mco_mem = malloc( sizeof(gidobj_t) * 442317172 );
char mco_fcode[PATHLEN]; char mco_index_fcode[PATHLEN];
char co_cbd_fcode[PATHLEN];char co_cbd_index_fcode[PATHLEN];
ref_seq_num = mco_dstat_readin.infile_num ;
qry_seq_num = co_dstat_readin.infile_num ;
kmerlen = co_dstat_readin.kmerlen;
dim_reduct_len = co_dstat_readin.dim_rd_len;
char distf[PATHLEN];
sprintf(distf, "%s/distance.out", distout_dir);
if(co_dstat_readin.koc){
size_t disf_sz = (size_t)mco_dstat_readin.infile_num*co_dstat_readin.infile_num*sizeof(koc_dist_t) ;
if(ftruncate(dist_bfp, disf_sz) == -1) err(errno,"mco_cbd_koc_dist()::ftruncate");
close(dist_bfp);
dist_bfp = open(onedist,O_RDWR, 0600);
if (dist_bfp == -1) err(errno,"mco_cbd_koc_dist()::%s",onedist);
koc_dist_t *ctx_obj_ct = mmap(NULL,disf_sz,PROT_READ | PROT_WRITE,MAP_SHARED,dist_bfp,0);
if(ctx_obj_ct == MAP_FAILED) err(errno,"ctx_obj_ct mmap error");
close(dist_bfp);
int num_unit_mem = mem_limit / (mco_dstat_readin.infile_num*sizeof(koc_dist_t) * page_sz);
if(num_unit_mem < 1) err(errno,"at least %fG memory needed to map ./onedist, specify more memory use -m",
(float)mco_dstat_readin.infile_num*sizeof(koc_dist_t) * page_sz/1073741824 );
int num_cof_batch = num_unit_mem*page_sz;
size_t unitsz_distf_mapped = (size_t)num_cof_batch * mco_dstat_readin.infile_num * sizeof(koc_dist_t) ;
int num_mapping_distf = co_dstat_readin.infile_num / num_cof_batch ;
for(int b=0;b<=num_mapping_distf;b++){
if(b==num_mapping_distf){
bnum_infile = co_dstat_readin.infile_num % num_cof_batch ;
if( bnum_infile == 0 ) continue;
}else bnum_infile = num_cof_batch;
maplength = (size_t)bnum_infile * mco_dstat_readin.infile_num * sizeof(koc_dist_t);
printf("disf_sz=%lu\trefnum=%d\tqrynum=%d\tnum_mapping_distf=%dbnum_infile=%d\t\t%lu\t%lu\tflag1:Ok\n",
disf_sz,mco_dstat_readin.infile_num,co_dstat_readin.infile_num,num_mapping_distf,bnum_infile,maplength,(size_t)b*unitsz_distf_mapped);
for ( int j = 0; j < mco_dstat_readin.comp_num; j++ ) {
sprintf(mco_index_fcode,"%s/mco.index.%d",refmco_dname,j);
sprintf(mco_fcode,"%s/mco.%d",refmco_dname,j);
FILE *indexfp, *mcofp;
if( (indexfp = fopen(mco_index_fcode,"rb"))==NULL) err(errno,"mco_cbd_koc_dist()::%s",mco_index_fcode);
fread(mco_offset_index,sizeof(size_t),comp_sz,indexfp);
fread(mco_bin_index,sizeof(unsigned int),comp_sz * ref_bin_num,indexfp);
fclose(indexfp);
struct stat s;
if( (mcofp = fopen(mco_fcode,"rb"))==NULL) err(errno,"mco_cbd_koc_dist()::%s",mco_fcode);
stat(mco_fcode, &s);
fread(mco_mem,sizeof(gidobj_t),s.st_size/sizeof(gidobj_t),mcofp);
fclose(mcofp);
sprintf(co_cbd_fcode,"%s/combco.%d",qryco_dname,j);
if( (cbd_fcode_comp_fp = fopen(co_cbd_fcode,"rb"))==NULL) err(errno,"mco_cbd_koc_dist()::%s",co_cbd_fcode);
stat(co_cbd_fcode, &cbd_fcode_stat);
llong *cbd_fcode_mem = malloc(cbd_fcode_stat.st_size);
fread(cbd_fcode_mem,sizeof(llong),cbd_fcode_stat.st_size/sizeof(llong),cbd_fcode_comp_fp);
fclose(cbd_fcode_comp_fp);
sprintf(co_cbd_index_fcode,"%s/combco.index.%d",qryco_dname,j);
if( (cbd_fcode_comp_index_fp = fopen(co_cbd_index_fcode,"rb"))==NULL)
err(errno,"mco_cbd_co_dist()::%s",co_cbd_index_fcode);
fread(fco_pos,sizeof(size_t),co_dstat_readin.infile_num + 1, cbd_fcode_comp_index_fp);
fclose(cbd_fcode_comp_index_fp);
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int kind = 0; kind < bnum_infile; kind++){
int k = b*num_cof_batch + kind;
if(qry_ctx_ct_list[k]==0) continue;
unsigned int ind, mcogid, pos;
size_t distf_offset = (size_t)k * mco_dstat_readin.infile_num;
for(int n = 0; n < fco_pos[k+1] - fco_pos[k]; n++){
ind = (unsigned int)(cbd_fcode_mem[ fco_pos[k] + n ] >> OCCRC_BIT) ;
pos = 0;
for(int bin=0; bin < ref_bin_num; bin++){
int bin_gnum = mco_bin_index[ ind *ref_bin_num + bin ];
for(int g = 0; g < bin_gnum ; g++ ){
mcogid = bin*BIN_SZ + mco_mem[ mco_offset_index[ind] + pos ];
ctx_obj_ct[distf_offset + mcogid].shared_k_ct++ ;
ctx_obj_ct[distf_offset + mcogid].shared_koc_ct += ( cbd_fcode_mem[ fco_pos[k] + n ] & OCCRC_MAX ) ;
pos++;
}
}
}
}
free(cbd_fcode_mem);
}
if ( msync( ctx_obj_ct + (size_t)b*num_cof_batch*mco_dstat_readin.infile_num, maplength, MS_SYNC ) < 0 )
err(errno,"mco_cbd_co_dist()::ctx_obj_ct msync failed");
munmap(ctx_obj_ct + (size_t)b*num_cof_batch*mco_dstat_readin.infile_num, maplength);
}
free(fco_pos);
//fprintf(logfp,"distance output to : %s\n",distf);
//printf("distance output to : %s\n",distf);
koc_dist_print_nobin(distout_dir,ref_seq_num, qry_seq_num, ref_ctx_ct_list, qry_ctx_ct_list,num_cof_batch,mcofname, cofname);
}
else{
int num_unit_mem = mem_limit / (mco_dstat_readin.infile_num*sizeof(ctx_obj_ct_t) * page_sz);
if(num_unit_mem < 1) err(errno,"at least %fG memory needed to map ./onedist, specify more memory use -m",
(float)mco_dstat_readin.infile_num*sizeof(ctx_obj_ct_t) * page_sz/1073741824 );
int num_cof_batch = num_unit_mem*page_sz;
if( opt_val_in->shared_kmerpath[0] != '\0'){
dist_print_nobin(distout_dir,ref_seq_num, qry_seq_num, ref_ctx_ct_list, qry_ctx_ct_list,num_cof_batch,mcofname, cofname,opt_val_in);
return;
}
size_t unitsz_distf_mapped = (size_t)num_cof_batch * mco_dstat_readin.infile_num * sizeof(ctx_obj_ct_t) ;
int num_mapping_distf = co_dstat_readin.infile_num / num_cof_batch ;
size_t disf_sz = (size_t)mco_dstat_readin.infile_num*co_dstat_readin.infile_num*sizeof(ctx_obj_ct_t) ;
if(ftruncate(dist_bfp, disf_sz) == -1) err(errno,"mco_cbd_co_dist()::ftruncate");
close(dist_bfp);
dist_bfp = open(onedist,O_RDWR, 0600);
if (dist_bfp == -1) err(errno,"mco_cbd_koc_dist()::%s",onedist);
ctx_obj_ct_t *ctx_obj_ct = mmap(NULL,disf_sz,PROT_READ | PROT_WRITE,MAP_SHARED,dist_bfp,0);
if(ctx_obj_ct == MAP_FAILED) err(errno,"ctx_obj_ct mmap error");
close(dist_bfp);
for(int b=0;b<=num_mapping_distf;b++){
if(b==num_mapping_distf){
bnum_infile = co_dstat_readin.infile_num % num_cof_batch ;
if( bnum_infile == 0 ) continue;
}else bnum_infile = num_cof_batch;
maplength = (size_t)bnum_infile * mco_dstat_readin.infile_num * sizeof(ctx_obj_ct_t);
printf("disf_sz=%lu\trefnum=%d\tqrynum=%d\tnum_mapping_distf=%dbnum_infile=%d\t\t%lu\t%lu\tflag1:Ok\n",
disf_sz,mco_dstat_readin.infile_num,co_dstat_readin.infile_num,num_mapping_distf,bnum_infile,maplength,(size_t)b*unitsz_distf_mapped);
for ( int j = 0; j < mco_dstat_readin.comp_num; j++ ) {
sprintf(mco_index_fcode,"%s/mco.index.%d",refmco_dname,j);
sprintf(mco_fcode,"%s/mco.%d",refmco_dname,j);
FILE *indexfp, *mcofp;
if( (indexfp = fopen(mco_index_fcode,"rb"))==NULL) err(errno,"mco_cbd_koc_dist()::%s",mco_index_fcode);
fread(mco_offset_index,sizeof(size_t),comp_sz,indexfp);
fread(mco_bin_index,sizeof(unsigned int),comp_sz * ref_bin_num,indexfp);
fclose(indexfp);
struct stat s;
if( (mcofp = fopen(mco_fcode,"rb"))==NULL) err(errno,"mco_cbd_koc_dist()::%s",mco_fcode);
stat(mco_fcode, &s);
fread(mco_mem,sizeof(gidobj_t),s.st_size/sizeof(gidobj_t),mcofp);
fclose(mcofp);
sprintf(co_cbd_fcode,"%s/combco.%d",qryco_dname,j);
if( (cbd_fcode_comp_fp = fopen(co_cbd_fcode,"rb"))==NULL) err(errno,"mco_cbd_koc_dist()::%s",co_cbd_fcode);
stat(co_cbd_fcode, &cbd_fcode_stat);
unsigned int *cbd_fcode_mem = malloc(cbd_fcode_stat.st_size);
fread(cbd_fcode_mem,sizeof(unsigned int),cbd_fcode_stat.st_size/sizeof(unsigned int),cbd_fcode_comp_fp);
fclose(cbd_fcode_comp_fp);
sprintf(co_cbd_index_fcode,"%s/combco.index.%d",qryco_dname,j);
if( (cbd_fcode_comp_index_fp = fopen(co_cbd_index_fcode,"rb"))==NULL)
err(errno,"mco_cbd_co_dist()::%s",co_cbd_index_fcode);
fread(fco_pos,sizeof(size_t),co_dstat_readin.infile_num + 1, cbd_fcode_comp_index_fp);
fclose(cbd_fcode_comp_index_fp);
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int kind = 0; kind < bnum_infile; kind++){
int k = b*num_cof_batch + kind;
if(qry_ctx_ct_list[k]==0) continue;
unsigned int ind, mcogid, pos;
size_t distf_offset = (size_t)k * mco_dstat_readin.infile_num;
for(int n = 0; n < fco_pos[k+1] - fco_pos[k]; n++){
ind = cbd_fcode_mem[ fco_pos[k] + n ];
pos = 0;
for(int bin=0; bin < ref_bin_num; bin++){
int bin_gnum = mco_bin_index[ ind *ref_bin_num + bin ];
for(int g = 0; g < bin_gnum ; g++ ){
mcogid = bin*BIN_SZ + mco_mem[ mco_offset_index[ind] + pos ];
ctx_obj_ct[distf_offset + mcogid]++ ;
pos++;
}
}
}
}
free(cbd_fcode_mem);
}
if ( msync( ctx_obj_ct + (size_t)b*num_cof_batch*mco_dstat_readin.infile_num, maplength, MS_SYNC ) < 0 )
err(errno,"mco_cbd_co_dist()::ctx_obj_ct msync failed");
munmap(ctx_obj_ct + (size_t)b*num_cof_batch*mco_dstat_readin.infile_num, maplength);
}
free(fco_pos);
fprintf(logfp,"distance output to : %s\n",distf);
printf("distance output to : %s\n",distf);
dist_print_nobin(distout_dir,ref_seq_num, qry_seq_num, ref_ctx_ct_list, qry_ctx_ct_list,num_cof_batch,mcofname, cofname,opt_val_in);
}
free(mco_offset_index);
free(mco_bin_index);
free(ref_ctx_ct_list);
free(qry_ctx_ct_list);
free(mcofname);
free(cofname);
}
static inline ctx_obj_ct_t * mco_co_mmpdist_core(gidobj_t** unit_arrmco, char *co_fcode_in, ctx_obj_ct_t * ctx_obj_ct_in )
{
mmp_uint_t mmpcofile;
mmpcofile = mmp_uint_arr(co_fcode_in);
unsigned int ind,mcogid;
int ctx_num = mmpcofile.fsize/sizeof(unsigned int);
for(int n = 0; n < ctx_num; n++){
ind = mmpcofile.mmpco[n];
if(unit_arrmco[ind] != NULL){
for(unsigned int k = 1; k< unit_arrmco[ind][0] + 1; k++ ){
mcogid = unit_arrmco[ind][k] ;
ctx_obj_ct_in[mcogid]++ ;
}
}
}
munmap(mmpcofile.mmpco, mmpcofile.fsize);
return ctx_obj_ct_in;
}
static inline void mco_co_dist_core(gidobj_t** unit_arrmco, char *co_fcode_in, int bin_sz,
mco_co_dist_t shared_ctx_num_in )
{
mmp_uint_t mmpcofile;
mmpcofile = mmp_uint_arr(co_fcode_in);
memset(shared_ctx_num_in,0, bin_sz);
unsigned int ind,mcogid;
int ctx_num = mmpcofile.fsize/sizeof(unsigned int);
for(int n = 0; n < ctx_num; n++){
ind = mmpcofile.mmpco[n] ;
if(unit_arrmco[ind] != NULL){
for(int k = 1; k< unit_arrmco[ind][0] + 1; k++ ){
mcogid = unit_arrmco[ind][k];
shared_ctx_num_in[mcogid]++ ;
}
}
}
munmap(mmpcofile.mmpco, mmpcofile.fsize);
printf("%s:",co_fcode_in);
for(int e=0; e < bin_sz; e++ ){
}
printf("\n");
}
void dist_print( const char *distf, FILE *dist_fp )
{
ctx_obj_ct_t *ctx_obj_ct;
int fd;
struct stat s;
fd = open (distf, O_RDONLY);
check (fd < 0, "open %s failed: %s", distf, strerror (errno));
fstat (fd, & s);
ctx_obj_ct = mmap(0, s.st_size , PROT_READ, MAP_PRIVATE, fd, 0);
check ( ctx_obj_ct == MAP_FAILED, "mmap %s failed: %s", distf, strerror (errno));
fprintf(dist_fp,"output %s\n",distf);
close(fd);
munmap(ctx_obj_ct, s.st_size);
}
char full_distfcode[PATHLEN];
void fname_dist_print(int ref_bin_code, int qry_fcode, const char *distout_dir, unsigned int*ref_ctx_ct_list,
unsigned int*qry_ctx_ct_list, char (*refname)[PATHLEN], char (*qryfname)[PATHLEN], FILE *dout_fp)
{
sprintf(full_distfcode,"%s/%d.%d.dist",distout_dir,ref_bin_code,qry_fcode);
ctx_obj_ct_t *ctx_obj_ct;
int fd;
struct stat s;
fd = open (full_distfcode, O_RDONLY);
check (fd < 0, "open %s failed: %s", full_distfcode, strerror (errno));
fstat (fd, & s);
ctx_obj_ct = mmap(0, s.st_size , PROT_READ, MAP_PRIVATE, fd, 0);
check ( ctx_obj_ct == MAP_FAILED, "mmap %s failed: %s", full_distfcode, strerror (errno));
double jac_ind,contain_ind,Dm,Da,P_K_in_X_XnY, P_K_in_Y_XnY,
j_prim, c_prim,
sd_j_prim, sd_c_prim,
CI95_j_prim1, CI95_j_prim2, CI95_c_prim1, CI95_c_prim2,
CI95_Dm_prim1,CI95_Dm_prim2,CI95_Da_prim1,CI95_Da_prim2;
int Min_XY_size, X_size, Y_size, XnY_size, XuY_size, X_XnY_size, Y_XnY_size ;
for(llong i = 0;i < s.st_size/sizeof(ctx_obj_ct_t); i++) {
X_size = ref_ctx_ct_list[ref_bin_code*BIN_SZ + i];
Y_size = qry_ctx_ct_list[qry_fcode];
Min_XY_size = X_size < Y_size ? X_size : Y_size ;
XnY_size = ctx_obj_ct[i];
XuY_size = X_size + Y_size - XnY_size ;
X_XnY_size = X_size - XnY_size;
Y_XnY_size = Y_size- XnY_size;
jac_ind = (double)XnY_size / XuY_size;
contain_ind = (double)XnY_size / Min_XY_size ;
Dm = jac_ind ==1? 0: -log(2*jac_ind/(1+jac_ind)) / kmerlen ;
Da = contain_ind==1? 0: -log(contain_ind) / kmerlen ;
P_K_in_X_XnY = 1 - pow( (1- 1/pow(alp_size,(kmerlen - dim_reduct_len) )), X_XnY_size );
P_K_in_Y_XnY = 1 - pow( (1- 1/pow(alp_size,(kmerlen - dim_reduct_len) )), Y_XnY_size );
double rs = P_K_in_X_XnY * P_K_in_Y_XnY * ( X_XnY_size + Y_XnY_size )
/(P_K_in_X_XnY + P_K_in_Y_XnY - 2*P_K_in_X_XnY * P_K_in_Y_XnY);
j_prim = ((double)XnY_size - rs) / XuY_size ;
c_prim = ((double)XnY_size - rs) / Min_XY_size ;
sd_j_prim = pow(j_prim*(1 - j_prim) / XuY_size, 0.5) ;
sd_c_prim = pow(c_prim*(1 - c_prim) / Min_XY_size,0.5) ;
CI95_j_prim1 = j_prim - 1.96*sd_j_prim;
CI95_j_prim2 = j_prim + 1.96*sd_j_prim;
CI95_c_prim1 = c_prim - 1.96*sd_c_prim;
CI95_c_prim2 = c_prim + 1.96*sd_c_prim;
CI95_Dm_prim1 = CI95_j_prim2 == 1? 0:-log(2*CI95_j_prim2/(1+CI95_j_prim2)) / kmerlen ;
CI95_Dm_prim2 = CI95_j_prim1 == 1? 0:-log(2*CI95_j_prim1/(1+CI95_j_prim1)) / kmerlen ;
CI95_Da_prim1 = CI95_c_prim2 == 1? 0:-log(CI95_c_prim2) / kmerlen ;
CI95_Da_prim2 = CI95_c_prim1 == 1? 0:-log(CI95_c_prim1) / kmerlen ;
double pv_j_prim = 0.5 * erfc( j_prim / sd_j_prim * pow(0.5,0.5) );
double pv_c_prim = 0.5 * erfc( c_prim/sd_c_prim * pow(0.5,0.5) );
double qv_j_prim = pv_j_prim * ref_seq_num*qry_seq_num ;
double qv_c_prim = pv_c_prim * ref_seq_num*qry_seq_num ;
fprintf(dout_fp,"%s\t%s\t%u-%u|%u|%u\t%lf\t%lf\t%lf\t%lf\t[%lf,%lf]\t[%lf,%lf]\t[%lf,%lf]\t[%lf,%lf]\t%E\t%E\t%E\t%E\n", qryfname[qry_fcode],refname[ref_bin_code*BIN_SZ + i],XnY_size,(unsigned int)rs,X_size,Y_size,jac_ind,Dm,
contain_ind,Da,CI95_j_prim1,CI95_j_prim2,CI95_Dm_prim1,CI95_Dm_prim2,CI95_c_prim1,
CI95_c_prim2,CI95_Da_prim1,CI95_Da_prim2,pv_j_prim,pv_c_prim,qv_j_prim,qv_c_prim);
}
close(fd);
munmap(ctx_obj_ct, s.st_size);
}
void koc_dist_print_nobin ( const char *distout_dir,unsigned int ref_num, unsigned int qry_num, unsigned int*ref_ctx_ct_list,
unsigned int*qry_ctx_ct_list, int num_cof_batch, char (*refname)[PATHLEN], char (*qryfname)[PATHLEN])
{
sprintf(full_distfcode,"%s/sharedk_ct.dat",distout_dir);
int fd;
struct stat s;
fd = open (full_distfcode, O_RDONLY);
check (fd < 0, "open %s failed: %s", full_distfcode, strerror (errno));
fstat (fd, & s);
koc_dist_t * ctx_obj_ct = mmap(0, s.st_size , PROT_READ, MAP_PRIVATE, fd, 0);
check ( ctx_obj_ct == MAP_FAILED, "mmap %s failed: %s", full_distfcode, strerror (errno));
close(fd);
char distf[PATHLEN];
sprintf(distf, "%s/distance.out", distout_dir);
FILE *distfp = fopen(distf,"a") ;
if( distfp == NULL ) err(errno,"dist_print_nobin():%s",distf);
double jac_ind,contain_ind,Dm,Da,P_K_in_X_XnY, P_K_in_Y_XnY,
j_prim, c_prim, Dm_prim, Da_prim, sd_j_prim, sd_c_prim,
CI95_j_prim1, CI95_j_prim2, CI95_c_prim1, CI95_c_prim2,
CI95_Dm_prim1,CI95_Dm_prim2,CI95_Da_prim1,CI95_Da_prim2;
int Min_XY_size, X_size, Y_size, XnY_size, XuY_size, X_XnY_size, Y_XnY_size ;
int num_mapping_distf = qry_num / num_cof_batch; int bnum_infile;
double abundence_rowsum;
for(int b=0;b<=num_mapping_distf;b++){
if(b==num_mapping_distf){
bnum_infile = qry_num % num_cof_batch ;
if( bnum_infile == 0 ) continue;
}else bnum_infile = num_cof_batch;
size_t maplength = (size_t)bnum_infile * ref_num * sizeof(koc_dist_t);
for(int qid = 0; qid < bnum_infile; qid++) {
abundence_rowsum = 0;
for(int rid = 0; rid < ref_num; rid++)
if(ctx_obj_ct[ ((llong)b*num_cof_batch + qid)*ref_num + rid ].shared_k_ct > 0)
abundence_rowsum += (double)ctx_obj_ct[ ((llong)b*num_cof_batch + qid) * ref_num + rid ].shared_koc_ct
/ (double)ctx_obj_ct[ ((llong)b*num_cof_batch + qid)*ref_num + rid ].shared_k_ct;
Y_size = qry_ctx_ct_list[b*num_cof_batch + qid];
for(int rid = 0; rid < ref_num; rid++) {
X_size = ref_ctx_ct_list[rid];
Min_XY_size = X_size < Y_size ? X_size : Y_size ;
XnY_size = ctx_obj_ct[ ((llong)b*num_cof_batch + qid)*ref_num + rid ].shared_k_ct;
double abundence_pct = (double)ctx_obj_ct[ ((llong)b*num_cof_batch + qid)*ref_num + rid ].shared_koc_ct
/ XnY_size;
XuY_size = X_size + Y_size - XnY_size ;
X_XnY_size = X_size - XnY_size;
Y_XnY_size = Y_size- XnY_size;
jac_ind = (double)XnY_size / XuY_size;
contain_ind = (double)XnY_size / Min_XY_size ;
Dm = jac_ind == 1? 0: -log(2*jac_ind/(1+jac_ind)) / kmerlen ;
Da = contain_ind== 1? 0: -log(contain_ind) / kmerlen ;
P_K_in_X_XnY = 1 - pow( (1- 1/pow(alp_size,(kmerlen - dim_reduct_len) )), X_XnY_size );
P_K_in_Y_XnY = 1 - pow( (1- 1/pow(alp_size,(kmerlen - dim_reduct_len) )), Y_XnY_size );
double rs = P_K_in_X_XnY * P_K_in_Y_XnY * ( X_XnY_size + Y_XnY_size )
/(P_K_in_X_XnY + P_K_in_Y_XnY - 2*P_K_in_X_XnY * P_K_in_Y_XnY);
j_prim = ((double)XnY_size - rs) / XuY_size ;
c_prim = ((double)XnY_size - rs) / Min_XY_size ;
Dm_prim = j_prim == 1? 0:-log(2*j_prim/(1+j_prim)) / kmerlen ;
Da_prim = c_prim==1? 0:-log(c_prim) / kmerlen ;
sd_j_prim = pow(j_prim*(1 - j_prim) / XuY_size, 0.5) ;
sd_c_prim = pow(c_prim*(1 - c_prim) / Min_XY_size,0.5) ;
CI95_j_prim1 = j_prim - 1.96*sd_j_prim;
CI95_j_prim2 = j_prim + 1.96*sd_j_prim;
CI95_c_prim1 = c_prim - 1.96*sd_c_prim;
CI95_c_prim2 = c_prim + 1.96*sd_c_prim;
CI95_Dm_prim1 = CI95_j_prim2 == 1? 0:-log(2*CI95_j_prim2/(1+CI95_j_prim2)) / kmerlen ;
CI95_Dm_prim2 = CI95_j_prim1 == 1? 0:-log(2*CI95_j_prim1/(1+CI95_j_prim1)) / kmerlen ;
CI95_Da_prim1 = CI95_c_prim2 == 1? 0:-log(CI95_c_prim2) / kmerlen ;
CI95_Da_prim2 = CI95_c_prim1 == 1? 0:-log(CI95_c_prim1) / kmerlen ;
double pv_j_prim = 0.5 * erfc( j_prim / sd_j_prim * pow(0.5,0.5) );
double pv_c_prim = 0.5 * erfc( c_prim/sd_c_prim * pow(0.5,0.5) );
double qv_j_prim = pv_j_prim * ref_seq_num*qry_seq_num ;
double qv_c_prim = pv_c_prim * ref_seq_num*qry_seq_num ;
fprintf(distfp,"%s\t%s\t%lf\t%u-%u|%u|%u\t%lf\t%lf\t%lf\t%lf\t%lf[%lf,%lf]\t%lf[%lf,%lf]\t%lf[%lf,%lf]\t%lf[%lf,%lf]\t%E\t%E\t%E\t%E\n",
qryfname[b*num_cof_batch + qid],refname[rid],abundence_pct,XnY_size,(unsigned int)rs,X_size,Y_size,jac_ind,Dm,
contain_ind,Da,j_prim,CI95_j_prim1,CI95_j_prim2,Dm_prim,CI95_Dm_prim1,CI95_Dm_prim2,c_prim,CI95_c_prim1,
CI95_c_prim2,Da_prim,CI95_Da_prim1,CI95_Da_prim2,pv_j_prim,pv_c_prim,qv_j_prim,qv_c_prim);
}
}
munmap(ctx_obj_ct + (size_t)b*num_cof_batch*ref_num, maplength);
}
fclose(distfp);
}
void dist_print_nobin (const char *distout_dir,unsigned int ref_num, unsigned int qry_num, unsigned int*ref_ctx_ct_list,
unsigned int*qry_ctx_ct_list, int num_cof_batch, char (*refname)[PATHLEN], char (*qryfname)[PATHLEN],dist_opt_val_t *opt_val)
{
if( opt_val->shared_kmerpath[0] != '\0') strcpy(full_distfcode, opt_val->shared_kmerpath );
else sprintf(full_distfcode,"%s/sharedk_ct.dat",distout_dir);
int fd;
struct stat s;
fd = open (full_distfcode, O_RDONLY);
check (fd < 0, "open %s failed: %s", full_distfcode, strerror (errno));
fstat (fd, & s);
ctx_obj_ct_t * ctx_obj_ct = mmap(0, s.st_size , PROT_READ, MAP_PRIVATE, fd, 0);
check ( ctx_obj_ct == MAP_FAILED, "mmap %s failed: %s", full_distfcode, strerror (errno));
close(fd);
int p_fit_mem= opt_val->p;
prt_line_t * prt_buf = malloc(ref_num * sizeof(prt_line_t));
char distf[PATHLEN];
sprintf(distf, "%s/distance.out", distout_dir);
FILE *distfp = stdout; //fopen(distf,"w") ;
if( distfp == NULL ) err(errno,"dist_print_nobin():%s",distf);
int num_mapping_distf = qry_num / num_cof_batch; int bnum_infile;
print_ctrl_t outfield;
outfield.metric = opt_val->metric ;
outfield.pfield = opt_val->outfields;
outfield.correction = opt_val->correction;
outfield.dthreshold = opt_val->mut_dist_max;
outfield.cmprsn_num = ref_num*qry_num;
#define MAX_CNAME_SIZE 30
char header[2][3][MAX_CNAME_SIZE] = {
{"Jaccard\tMashD","P-value(J)\tFDR(J)","Jaccard_CI\tMashD_CI" },
{"ContainmentM\tAafD","P-value(C)\tFDR(C)","ContainmentM_CI\tAafD_CI" }
};
fprintf(distfp,"Qry\tRef\tShared_k|Ref_s|Qry_s");
for( int i = 0 ; i<= (int)outfield.pfield ; i++)
fprintf(distfp,"\t%s", header[outfield.metric][i]);
fprintf(distfp,"\n");
#define NREF 1024
int N_max = opt_val->num_neigb;
if( (N_max > NREF) || (N_max > ref_num) ) { err(errno,"neighborN_max %d should smaller than NREF %d and ref_num %d",N_max,NREF,ref_num); } ;
typedef struct { double metric; int rid; } Nref_stuct;
Nref_stuct bestNref[NREF];
for(int b=0;b<=num_mapping_distf;b++){
if(b==num_mapping_distf){
bnum_infile = qry_num % num_cof_batch ;
if( bnum_infile == 0 ) continue;
}else bnum_infile = num_cof_batch;
size_t maplength = (size_t)bnum_infile * ref_num * sizeof(ctx_obj_ct_t);
for(int qid = 0; qid < bnum_infile; qid++) {
outfield.Y_size = qry_ctx_ct_list[b*num_cof_batch + qid];
outfield.qname = qryfname[b*num_cof_batch + qid];
outfield.qry_len = strlen(outfield.qname);
llong offset = ((llong)b*num_cof_batch + qid)*ref_num;
if( N_max ) {
for (int i=0; i< N_max ;i++ ) bestNref[i] = (Nref_stuct){0, -1};
for(int rid = 0; rid < ref_num; rid++) {
unsigned int X_size = ref_ctx_ct_list[rid];
unsigned int XnY_size = ctx_obj_ct[offset + rid];
double metric = outfield.metric == Ctm ?
(double) XnY_size / (X_size < outfield.Y_size ? X_size : outfield.Y_size) :
(double) XnY_size / (X_size + outfield.Y_size - XnY_size) ;
for(int i = N_max - 1 ; i>=0; i-- ){
if(metric > bestNref[i].metric){
bestNref[i+1] = bestNref[i];
bestNref[i] = (Nref_stuct){ metric, rid };
}
else break;
}
}
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for( int i = 0 ; i< N_max;i++ ){
if (bestNref[i].rid != -1)
output_ctrl( ref_ctx_ct_list[ bestNref[i].rid ], ctx_obj_ct[offset + bestNref[i].rid], &outfield, refname[bestNref[i].rid], &prt_buf[i]);
else prt_buf[i].len = 0 ;
}
for( int i = 0 ; i< N_max;i++ )
if(prt_buf[i].len >1) fwrite(prt_buf[i].line, prt_buf[i].len, 1, distfp);;
}
else {
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int rid = 0; rid < ref_num; rid++)
output_ctrl( ref_ctx_ct_list[ rid ], ctx_obj_ct[ offset + rid ], &outfield, refname[rid], &prt_buf[rid]);
for(int rid = 0; rid < ref_num; rid++)
if(prt_buf[rid].len >1) fwrite(prt_buf[rid].line, prt_buf[rid].len, 1, distfp);;
}
}
munmap(ctx_obj_ct + (size_t)b*num_cof_batch*ref_num, maplength);
}
fclose(distfp);
free(prt_buf);
if(!opt_val->keep_shared_kmer) remove(full_distfcode);
}
#define GET_MATRIC(X,Y) ( (X) == Jcd ? 1 / (2*(Y)) + 0.5 : 1 / (Y) )
static inline void output_ctrl (unsigned int X_size, unsigned int XnY_size, print_ctrl_t* outfield, char *rname, prt_line_t* linebuf ){
double rs = 0;
if(outfield->correction){
unsigned int X_XnY_size = X_size - XnY_size;
unsigned int Y_XnY_size = outfield->Y_size - XnY_size;
double P_K_in_X_XnY = 1 - pow( (1- 1/pow(alp_size,(kmerlen - dim_reduct_len) )), X_XnY_size );
double P_K_in_Y_XnY = 1 - pow( (1- 1/pow(alp_size,(kmerlen - dim_reduct_len) )), Y_XnY_size );
rs = P_K_in_X_XnY * P_K_in_Y_XnY * ( X_XnY_size + Y_XnY_size )
/(P_K_in_X_XnY + P_K_in_Y_XnY - 2*P_K_in_X_XnY * P_K_in_Y_XnY);
}
unsigned int tmp = outfield->metric == Jcd ? X_size + outfield->Y_size - XnY_size
:(X_size < outfield->Y_size ? X_size : outfield->Y_size);
double metric = ((double)XnY_size - rs) / tmp;
double dist = log( GET_MATRIC(outfield->metric,metric) ) / kmerlen;
if (dist > 1) dist = 1 ;
if( dist > outfield->dthreshold ) { linebuf->len = 0 ; return; };
#define LST_PRT_LEN 27
snprintf(linebuf->line,LINE_LEN,"%s\t%s\t%u-%u|%u|%u\t%.6lf\t%.6lf",outfield->qname,rname,XnY_size,(unsigned int)rs,X_size, outfield->Y_size, metric, dist);
linebuf->len = LST_PRT_LEN + outfield->qry_len + strlen(linebuf->line + LST_PRT_LEN + outfield->qry_len);
if(outfield->pfield > Dst) {
double sd = pow(metric*(1 - metric) / tmp, 0.5) ;
double pv = 0.5 * erfc( metric / sd * pow(0.5,0.5) ) ;
snprintf(linebuf->line + linebuf->len, LINE_LEN - linebuf->len,"\t%E\t%E", pv, pv * outfield->cmprsn_num );
linebuf->len += 4 + strlen(linebuf->line + linebuf->len + 4);
if(outfield->pfield >Qv){
double CI95_mtrc_1 = metric - 1.96*sd;
double CI95_mtrc_2 = metric + 1.96*sd;
double CI95_dist_1 = log( GET_MATRIC(outfield->metric,CI95_mtrc_2) ) / kmerlen;
double CI95_dist_2 = log( GET_MATRIC(outfield->metric,CI95_mtrc_1) ) / kmerlen;
snprintf(linebuf->line + linebuf->len, LINE_LEN - linebuf->len,"\t[%.6lf,%.6lf]\t[%.6lf,%.6lf]", CI95_mtrc_1, CI95_mtrc_2, CI95_dist_1, CI95_dist_2);
linebuf->len += 20 + strlen(linebuf->line + linebuf->len + 20);
}
}
snprintf(linebuf->line + linebuf->len, LINE_LEN - linebuf->len,"\n");
linebuf->len += 1;
}
infile_tab_t* dist_organize_infiles (dist_opt_val_t *opt_val)
{
int fmt_ck;
if(opt_val->pipecmd[0]=='\0')
fmt_ck = 1;
else
fmt_ck = 0;
if( strcmp( opt_val->fpath, "" ) !=0 )
{
return organize_infile_list(opt_val->fpath,fmt_ck);
}
else if( opt_val->num_remaining_args > 0 )
{
return organize_infile_frm_arg(opt_val->num_remaining_args, opt_val->remaining_args,fmt_ck);
}
else
{
perror("please specify the input/query files");
return NULL;
}
};
extern const char *acpt_infile_fmt[ACPT_FMT_SZ] ;
infile_tab_t* dist_organize_refpath( dist_opt_val_t *opt_val){
struct stat path_stat;
if( stat(opt_val->refpath, &path_stat) < 0)
err(errno,"dist_organize_refpath():%s",opt_val->refpath);
if( S_ISDIR(path_stat.st_mode) || isOK_fmt_infile(opt_val->refpath, acpt_infile_fmt,ACPT_FMT_SZ) ){
char * tmp_arg[] = {opt_val->refpath};
return organize_infile_frm_arg(1, tmp_arg,1);
}
else if(S_ISREG(path_stat.st_mode))
return organize_infile_list(opt_val->refpath,1);
else
return NULL;
}
const char * combine_queries(dist_opt_val_t *opt_val)
{
int p_fit_mem = opt_val->p;
const char* co_dir = opt_val->outdir;
const char kmerct_list_fname[] = "tmp_kmerct_list";
const char fname_fname[] = "tmp_infiles_fname";
if(opt_val->abundance){err(errno,"combine_queries(): abundance model not supported yet");}
FILE *co_stat_fp;
const char *qryco_dstat_fpath = NULL;
qryco_dstat_fpath = test_get_fullpath(opt_val->remaining_args[0], co_dstat);
if( ( co_stat_fp = fopen(qryco_dstat_fpath,"rb")) == NULL ){
err(errno,"combine_queries():%s", qryco_dstat_fpath);
}
co_dstat_t co_dstat_one, co_dstat_it;
fread(&co_dstat_one, sizeof(co_dstat_t), 1, co_stat_fp);
if (co_dstat_one.koc) { err(errno,"combine_queries(): abundance model not supported yet"); }
char one_kmerct_list_name[PATHLEN];
sprintf(one_kmerct_list_name,"%s/%s",co_dir,kmerct_list_fname);
FILE *kmerct_list_fp;
if( (kmerct_list_fp = fopen(one_kmerct_list_name,"wb") ) == NULL ) {err(errno,"%s",one_kmerct_list_name);}
ctx_obj_ct_t * tmp_ct_list = malloc(sizeof(ctx_obj_ct_t) * co_dstat_one.infile_num);
fread(tmp_ct_list,sizeof(ctx_obj_ct_t),co_dstat_one.infile_num,co_stat_fp);
fwrite(tmp_ct_list,sizeof(ctx_obj_ct_t),co_dstat_one.infile_num,kmerct_list_fp);
char one_infilename_name[PATHLEN];
sprintf(one_infilename_name,"%s/%s",co_dir,fname_fname);
FILE *infilename_name_fp;
if( ( infilename_name_fp = fopen(one_infilename_name,"wb") ) == NULL ){err(errno,"%s", one_infilename_name);}
char (*tmpname)[PATHLEN] = malloc(PATHLEN * co_dstat_one.infile_num);
fread(tmpname,PATHLEN,co_dstat_one.infile_num,co_stat_fp);
fwrite(tmpname,PATHLEN,co_dstat_one.infile_num,infilename_name_fp);
fclose(co_stat_fp);
FILE** com_cofp = malloc( sizeof(FILE*) * co_dstat_one.comp_num);
FILE** indexfp = malloc( sizeof(FILE*) * co_dstat_one.comp_num);
size_t *index_offset = malloc(sizeof(size_t) * co_dstat_one.comp_num);
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int c = 0; c < co_dstat_one.comp_num; c++) {
struct stat file_stat;
char combined_cof[PATHLEN];
sprintf(combined_cof,"%s/combco.%d",co_dir,c);
if( (com_cofp[c] = fopen(combined_cof,"wb")) == NULL) err(errno,"%s",combined_cof);
sprintf(combined_cof,"%s/combco.%d",opt_val->remaining_args[0],c);
stat(combined_cof, &file_stat);
unsigned int *tmp_combco = malloc(file_stat.st_size);
FILE *com_cofp_it;
if( (com_cofp_it = fopen(combined_cof,"rb")) == NULL) err(errno,"%s",combined_cof);
fread(tmp_combco, file_stat.st_size, 1, com_cofp_it);
fwrite(tmp_combco, file_stat.st_size, 1, com_cofp[c]);
fclose(com_cofp_it);
free(tmp_combco);
char indexfname[PATHLEN];
sprintf(indexfname,"%s/combco.index.%d",co_dir,c);
if( (indexfp[c] = fopen(indexfname,"wb")) == NULL) err(errno,"%s",indexfname);
sprintf(indexfname, "%s/combco.index.%d", opt_val->remaining_args[0], c);
stat(indexfname, &file_stat);
size_t *tmp_index_combco = malloc(file_stat.st_size);
FILE *com_indexfp_it;
if((com_indexfp_it = fopen(indexfname,"rb") ) == NULL) err(errno,"%s",indexfname);
fread(tmp_index_combco,file_stat.st_size, 1,com_indexfp_it);
fwrite(tmp_index_combco,file_stat.st_size, 1,indexfp[c]);
index_offset[c] = tmp_index_combco[file_stat.st_size/sizeof(size_t) - 1] ;
fclose(com_indexfp_it);
free(tmp_index_combco);
}
for (int i = 1; i< opt_val->num_remaining_args; i++){
qryco_dstat_fpath = test_get_fullpath(opt_val->remaining_args[i],co_dstat);
if(qryco_dstat_fpath == NULL){
printf("%dth query %s is not a valid query: no %s file\n", i, opt_val->remaining_args[i], co_dstat);
continue;
}
else if( ( co_stat_fp = fopen(qryco_dstat_fpath,"rb")) == NULL ){
printf("combine_queries(): %dth query can not open %s\n", i, qryco_dstat_fpath);
continue;
}
fread(&co_dstat_it, sizeof(co_dstat_t), 1, co_stat_fp);
if(co_dstat_one.shuf_id != co_dstat_it.shuf_id){
printf("combine_queries(): %dth shuf_id: %u not match 0th shuf_id: %u\n",i, co_dstat_it.shuf_id, co_dstat_one.shuf_id);
fclose(co_stat_fp);
continue;
}
else if (co_dstat_it.koc){
printf("combine_queries(): %dth query abundance model not supported yet \n", i);
fclose(co_stat_fp);
continue;
}
co_dstat_one.all_ctx_ct += co_dstat_it.all_ctx_ct;
co_dstat_one.infile_num += co_dstat_it.infile_num;
tmp_ct_list = realloc(tmp_ct_list, sizeof(ctx_obj_ct_t) * co_dstat_it.infile_num);
fread(tmp_ct_list,sizeof(ctx_obj_ct_t),co_dstat_it.infile_num,co_stat_fp);
fwrite(tmp_ct_list,sizeof(ctx_obj_ct_t),co_dstat_it.infile_num,kmerct_list_fp);
tmpname = realloc(tmpname, PATHLEN * co_dstat_it.infile_num ) ;
fread(tmpname,PATHLEN,co_dstat_it.infile_num,co_stat_fp);
fwrite(tmpname,PATHLEN,co_dstat_it.infile_num,infilename_name_fp);
fclose(co_stat_fp);
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int c = 0; c < co_dstat_one.comp_num; c++) {
char combined_cof_it[PATHLEN];
FILE *com_cofp_it;
struct stat file_stat;
sprintf(combined_cof_it,"%s/combco.%d",opt_val->remaining_args[i],c);
stat(combined_cof_it,&file_stat);
unsigned int *tmpco = malloc(file_stat.st_size);
if( (com_cofp_it = fopen(combined_cof_it,"rb")) == NULL) err(errno,"%s",combined_cof_it);
fread(tmpco,file_stat.st_size, 1, com_cofp_it);
fwrite(tmpco,file_stat.st_size, 1, com_cofp[c]);
fclose(com_cofp_it);
free(tmpco);
char indexfname_it[PATHLEN];
sprintf(indexfname_it,"%s/combco.index.%d",opt_val->remaining_args[i],c);
stat(indexfname_it, &file_stat);
size_t * tmpindex = malloc(file_stat.st_size);
FILE *indexfp_it;
if( (indexfp_it = fopen(indexfname_it,"rb")) == NULL) err(errno,"%s",indexfname_it);
fread(tmpindex,file_stat.st_size, 1,indexfp_it);
fclose(indexfp_it);
int tmp_infile_num = file_stat.st_size/sizeof(size_t);
for(int i=1; i< tmp_infile_num; i++ ) { tmpindex[i] += index_offset[c];}
fwrite( tmpindex + 1, sizeof(size_t), tmp_infile_num - 1, indexfp[c]);
index_offset[c] = tmpindex[tmp_infile_num-1];
}
}
fclose(kmerct_list_fp);
fclose(infilename_name_fp);
#pragma omp parallel for num_threads(p_fit_mem) schedule(guided)
for(int c = 0; c < co_dstat_one.comp_num; c++) {
fclose(com_cofp[c]);
fclose(indexfp[c]);
}
FILE *one_co_stat_fp;
char one_stat_name[PATHLEN];
sprintf(one_stat_name,"%s/%s",co_dir,co_dstat);
if( (one_co_stat_fp = fopen(one_stat_name,"wb")) == NULL) { err(errno,"%s",one_stat_name) ;};
fwrite(&co_dstat_one,sizeof(co_dstat_t),1,one_co_stat_fp);
struct stat file_stat;
stat(one_kmerct_list_name, &file_stat);
tmp_ct_list = realloc(tmp_ct_list, file_stat.st_size);
if( (kmerct_list_fp = fopen(one_kmerct_list_name,"rb") ) == NULL ) {err(errno,"%s",one_kmerct_list_name);}
fread(tmp_ct_list,file_stat.st_size,1,kmerct_list_fp);
fwrite(tmp_ct_list,file_stat.st_size,1,one_co_stat_fp);
stat(one_infilename_name,&file_stat);
tmpname = realloc(tmpname,file_stat.st_size);
if( (infilename_name_fp = fopen(one_infilename_name,"rb") ) == NULL ){err(errno,"%s", one_infilename_name);}
fread(tmpname,file_stat.st_size,1,infilename_name_fp);
fwrite(tmpname,file_stat.st_size,1,one_co_stat_fp);
fclose(one_co_stat_fp);
remove(one_kmerct_list_name);
remove(one_infilename_name);
free(tmp_ct_list);
free(tmpname);
free(com_cofp);
free(indexfp);
return co_dir;
}
|
hermv_c_csc_n_hi_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include <memory.h>
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
hermv_csc_n_hi_trans_unroll(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], y[i], beta);
}
// each thread has a y_local
ALPHA_Number **y_local = alpha_memalign(num_threads * sizeof(ALPHA_Number *), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < num_threads; i++)
{
y_local[i] = alpha_memalign(m * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT);
memset(y_local[i], '\0', sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT ais = A->cols_start[i];
ALPHA_INT aie = A->cols_end[i];
ALPHA_INT start = ais;
ALPHA_INT end = alpha_lower_bound(&A->row_indx[ais], &A->row_indx[aie], i) - A->row_indx;
if(A->row_indx[end] == i){
ALPHA_Number tmp;
alpha_mul_3c(tmp, alpha, A->values[end]);
alpha_madde(y_local[tid][i], tmp, x[i]);
}
const ALPHA_INT* A_row = &A->row_indx[ais];
const ALPHA_Number* A_val = &A->values[ais];
ALPHA_INT ai = 0;
ALPHA_INT ail = end - start;
ALPHA_Number alpha_xi, tmp;
alpha_mul(alpha_xi, alpha, x[i]);
for(; ai < ail-3; ai+=4)
{
ALPHA_Number av0 = A_val[ai];
ALPHA_Number av1 = A_val[ai + 1];
ALPHA_Number av2 = A_val[ai + 2];
ALPHA_Number av3 = A_val[ai + 3];
ALPHA_INT ar0 = A_row[ai];
ALPHA_INT ar1 = A_row[ai + 1];
ALPHA_INT ar2 = A_row[ai + 2];
ALPHA_INT ar3 = A_row[ai + 3];
alpha_madde_2c(y_local[tid][ar0], av0, alpha_xi);
alpha_madde_2c(y_local[tid][ar1], av1, alpha_xi);
alpha_madde_2c(y_local[tid][ar2], av2, alpha_xi);
alpha_madde_2c(y_local[tid][ar3], av3, alpha_xi);
alpha_mul(tmp, alpha, av0);
alpha_madde(y_local[tid][i], tmp, x[ar0]);
alpha_mul(tmp, alpha, av1);
alpha_madde(y_local[tid][i], tmp, x[ar1]);
alpha_mul(tmp, alpha, av2);
alpha_madde(y_local[tid][i], tmp, x[ar2]);
alpha_mul(tmp, alpha, av3);
alpha_madde(y_local[tid][i], tmp, x[ar3]);
}
for(; ai < ail; ai++)
{
ALPHA_Number av = A_val[ai];
ALPHA_INT ar = A_row[ai];
alpha_madde_2c(y_local[tid][ar], av, alpha_xi);
alpha_mul(tmp, alpha, av);
alpha_madde(y_local[tid][i], tmp, x[ar]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT col = 0; col < m; col++)
for(ALPHA_INT i = 0; i < num_threads; i++)
{
//y[col] += y_local[i][col];
alpha_add(y[col], y[col], y_local[i][col]);
}
for(ALPHA_INT i = 0; i < num_threads; i++)
{
alpha_free(y_local[i]);
}
alpha_free(y_local);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return hermv_csc_n_hi_trans_unroll(alpha, A, x, beta, y);
}
|
GB_emult_04.c | //------------------------------------------------------------------------------
// GB_emult_04: C<M>= A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity
// structure as M, and its pattern is a subset of M.
// ------------------------------------------
// C <M>= A .* B
// ------------------------------------------
// sparse sparse bitmap bitmap (method: 04)
// sparse sparse bitmap full (method: 04)
// sparse sparse full bitmap (method: 04)
// sparse sparse full full (method: 04)
// TODO: this function can also do eWiseAdd, just as easily.
// Just change the "&&" to "||" in the GB_emult_04_template.
// If A and B are both full, eadd and emult are identical.
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (M_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_04 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // sparse/hyper, not NULL
const bool Mask_struct, // if true, use the only structure of M
bool *mask_applied, // if true, the mask was applied
const GrB_Matrix A, // input A matrix (bitmap/full)
const GrB_Matrix B, // input B matrix (bitmap/full)
const GrB_BinaryOp op, // op to perform C = op (A,B)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK (M, "M for emult_04", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_04", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_04", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_04", GB0) ;
ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ;
int C_sparsity = GB_sparsity (M) ;
GBURBLE ("emult_04:(%s<%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (M),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mh = M->h ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ;
const int64_t vlen = M->vlen ;
const int64_t vdim = M->vdim ;
const int64_t nvec = M->nvec ;
const int64_t mnz = GB_nnz (M) ;
const size_t msize = M->type->size ;
const int8_t *restrict Ab = A->b ;
const int8_t *restrict Bb = B->b ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, true, // sparse or hyper (same as M), static header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, M->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the mask matrix M
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int M_ntasks, M_nthreads ;
GB_SLICE_MATRIX (M, 8, chunk) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + M_ntasks ;
Cp_kfirst = Work + M_ntasks * 2 ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
// TODO: if M is structural and A and B are both full, then C has exactly
// the same pattern as M, the first phase can be skipped.
int tid ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < M_ntasks ; tid++)
{
int64_t kfirst = kfirst_Mslice [tid] ;
int64_t klast = klast_Mslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Mh, k) ;
int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j)
int64_t pM, pM_end ;
GB_get_pA (&pM, &pM_end, tid, k,
kfirst, klast, pstart_Mslice, Mp, vlen) ;
int64_t cjnz = 0 ;
for ( ; pM < pM_end ; pM++)
{
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
cjnz +=
(GBB (Ab, pstart + i)
&& // TODO: for GB_add, use || instead
GBB (Bb, pstart + i)) ;
}
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
//--------------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//--------------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = Cp [nvec] ;
// set C->iso = C_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead
if (GB_IS_HYPERSPARSE (M))
{
// copy M->h into C->h
GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ;
}
C->nvec = nvec ;
C->jumbled = M->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_binop_code) ;
bool op_is_second = (opcode == GB_SECOND_binop_code) ;
bool op_is_pair = (opcode == GB_PAIR_binop_code) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
#define GB_PHASE_2_OF_2
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso
//----------------------------------------------------------------------
// Cx [0] = cscalar = op (A,B)
GB_BURBLE_MATRIX (C, "(iso emult) ") ;
memcpy (C->x, cscalar, csize) ;
// pattern of C = set intersection of pattern of A and B
#define GB_ISO_EMULT
#include "GB_emult_04_template.c"
}
else
{
//----------------------------------------------------------------------
// C is non-iso
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_AemultB_04(mult,xname) GB (_AemultB_04_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_04(mult,xname) (C, M, Mask_struct, A, B, \
Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker
//----------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_04: %s) ", op->name) ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD4, Cp_kfirst,
M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0,
M, Mask_struct, false, A, B, Context) ;
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
ASSERT_MATRIX_OK (C, "C output for emult_04", GB0) ;
(*mask_applied) = true ;
return (GrB_SUCCESS) ;
}
|
omp-not-thrdprvt.c | #include <stdio.h>
int x, y;
int main() {
#pragma omp parallel
{
x = omp_get_thread_num();
}
#pragma omp parallel
{
if(x % 2 == 0)
y = x + 1;
else
y = 0;
}
#pragma omp parallel
{
printf("%d, %d %d\n", x, y, omp_get_thread_num());
}
#pragma omp parallel
{
printf("%d, %d %d\n", x, y, omp_get_thread_num());
}
}
|
Mapping.h | //===--------- Mapping.h - OpenMP device runtime mapping helpers -- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#ifndef OMPTARGET_MAPPING_H
#define OMPTARGET_MAPPING_H
#include "Types.h"
namespace _OMP {
namespace mapping {
#pragma omp begin declare target device_type(nohost)
inline constexpr uint32_t MaxThreadsPerTeam = 1024;
#pragma omp end declare target
/// Initialize the mapping machinery.
void init(bool IsSPMD);
/// Return true if the kernel is executed in SPMD mode.
bool isSPMDMode();
/// Return true if the kernel is executed in generic mode.
bool isGenericMode();
/// Return true if the executing thread is the main thread in generic mode.
/// These functions will lookup state and it is required that that is OK for the
/// thread and location. See also `isInitialThreadInLevel0` for a stateless
/// alternative for certain situations, e.g. during initialization.
bool isMainThreadInGenericMode();
bool isMainThreadInGenericMode(bool IsSPMD);
/// Return true if this thread is the initial thread in parallel level 0.
///
/// The thread for which this returns true should be used for single threaded
/// initialization tasks. We pick a special thread to ensure there are no
/// races between the initialization and the first read of initialized state.
bool isInitialThreadInLevel0(bool IsSPMD);
/// Return true if the executing thread has the lowest Id of the active threads
/// in the warp.
bool isLeaderInWarp();
/// Return a mask describing all active threads in the warp.
LaneMaskTy activemask();
/// Return a mask describing all threads with a smaller Id in the warp.
LaneMaskTy lanemaskLT();
/// Return a mask describing all threads with a larget Id in the warp.
LaneMaskTy lanemaskGT();
/// Return the thread Id in the warp, in [0, getWarpSize()).
uint32_t getThreadIdInWarp();
/// Return the thread Id in the block, in [0, getBlockSize()).
uint32_t getThreadIdInBlock();
/// Return the warp id in the block.
uint32_t getWarpId();
/// Return the warp size, thus number of threads in the warp.
uint32_t getWarpSize();
/// Return the number of warps in the block.
uint32_t getNumberOfWarpsInBlock();
/// Return the block Id in the kernel, in [0, getKernelSize()).
uint32_t getBlockId();
/// Return the block size, thus number of threads in the block.
///
/// Note: The version taking \p IsSPMD mode explicitly can be used during the
/// initialization of the target region, that is before `mapping::isSPMDMode()`
/// can be called by any thread other than the main one.
uint32_t getBlockSize();
uint32_t getBlockSize(bool IsSPMD);
/// Return the number of blocks in the kernel.
uint32_t getNumberOfBlocks();
/// Return the kernel size, thus number of threads in the kernel.
uint32_t getKernelSize();
/// Return the number of processing elements on the device.
uint32_t getNumberOfProcessorElements();
} // namespace mapping
} // namespace _OMP
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 4194304
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
SetImageInfo(clone_info,0,exception);
if (*clone_info->filename != '\0')
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine,exception);
else
(void) CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
omp-low.c | /* Lowering pass for OMP directives. Converts OMP directives into explicit
calls to the runtime library (libgomp), data marshalling to implement data
sharing and copying clauses, offloading to accelerators, and more.
Contributed by Diego Novillo <dnovillo@redhat.com>
Copyright (C) 2005-2017 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "internal-fn.h"
#include "gimple-fold.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "splay-tree.h"
#include "omp-general.h"
#include "omp-low.h"
#include "omp-grid.h"
#include "gimple-low.h"
#include "symbol-summary.h"
#include "tree-nested.h"
#include "context.h"
#include "gomp-constants.h"
#include "gimple-pretty-print.h"
#include "hsa-common.h"
/* Lowering of OMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
re-gimplifying things when variables have been replaced with complex
expressions.
Final code generation is done by pass_expand_omp. The flowgraph is
scanned for regions which are then moved to a new
function, to be invoked by the thread library, or offloaded. */
/* Context structure. Used to store information about each parallel
directive in the code. */
struct omp_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
/* The tree of contexts corresponding to the encountered constructs. */
struct omp_context *outer;
gimple *stmt;
/* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
tree sender_decl;
tree receiver_decl;
/* These are used just by task contexts, if task firstprivate fn is
needed. srecord_type is used to communicate from the thread
that encountered the task construct to task firstprivate fn,
record_type is allocated by GOMP_task, initialized by task firstprivate
fn and passed to the task body fn. */
splay_tree sfield_map;
tree srecord_type;
/* A chain of variables to add to the top-level block surrounding the
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
/* Label to which GOMP_cancel{,llation_point} and explicit and implicit
barriers should jump to during omplower pass. */
tree cancel_label;
/* The sibling GIMPLE_OMP_FOR simd with _simt_ clause or NULL
otherwise. */
gimple *simt_stmt;
/* What to do with variables with implicitly determined sharing
attributes. */
enum omp_clause_default_kind default_kind;
/* Nesting depth of this context. Used to beautify error messages re
invalid gotos. The outermost ctx is depth 1, with depth 0 being
reserved for the main body of the function. */
int depth;
/* True if this parallel directive is nested within another. */
bool is_nested;
/* True if this construct can be cancelled. */
bool cancellable;
};
static splay_tree all_contexts;
static int taskreg_nesting_level;
static int target_nesting_level;
static bitmap task_shared_vars;
static vec<omp_context *> taskreg_contexts;
static void scan_omp (gimple_seq *, omp_context *);
static tree scan_omp_1_op (tree *, int *, void *);
#define WALK_SUBSTMTS \
case GIMPLE_BIND: \
case GIMPLE_TRY: \
case GIMPLE_CATCH: \
case GIMPLE_EH_FILTER: \
case GIMPLE_TRANSACTION: \
/* The sub-statements for these should be walked. */ \
*handled_ops_p = false; \
break;
/* Return true if CTX corresponds to an oacc parallel region. */
static bool
is_oacc_parallel (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_PARALLEL));
}
/* Return true if CTX corresponds to an oacc kernels region. */
static bool
is_oacc_kernels (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_KERNELS));
}
/* If DECL is the artificial dummy VAR_DECL created for non-static
data member privatization, return the underlying "this" parameter,
otherwise return NULL. */
tree
omp_member_access_dummy_var (tree decl)
{
if (!VAR_P (decl)
|| !DECL_ARTIFICIAL (decl)
|| !DECL_IGNORED_P (decl)
|| !DECL_HAS_VALUE_EXPR_P (decl)
|| !lang_hooks.decls.omp_disregard_value_expr (decl, false))
return NULL_TREE;
tree v = DECL_VALUE_EXPR (decl);
if (TREE_CODE (v) != COMPONENT_REF)
return NULL_TREE;
while (1)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE)
return v;
return NULL_TREE;
default:
return NULL_TREE;
}
}
/* Helper for unshare_and_remap, called through walk_tree. */
static tree
unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data)
{
tree *pair = (tree *) data;
if (*tp == pair[0])
{
*tp = unshare_expr (pair[1]);
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Return unshare_expr (X) with all occurrences of FROM
replaced with TO. */
static tree
unshare_and_remap (tree x, tree from, tree to)
{
tree pair[2] = { from, to };
x = unshare_expr (x);
walk_tree (&x, unshare_and_remap_1, pair, NULL);
return x;
}
/* Convenience function for calling scan_omp_1_op on tree operands. */
static inline tree
scan_omp_op (tree *tp, omp_context *ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
return walk_tree (tp, scan_omp_1_op, &wi, NULL);
}
static void lower_omp (gimple_seq *, omp_context *);
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Return true if CTX is for an omp parallel. */
static inline bool
is_parallel_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
}
/* Return true if CTX is for an omp task. */
static inline bool
is_task_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
}
/* Return true if CTX is for an omp taskloop. */
static inline bool
is_taskloop_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP;
}
/* Return true if CTX is for an omp parallel or omp task. */
static inline bool
is_taskreg_ctx (omp_context *ctx)
{
return is_parallel_ctx (ctx) || is_task_ctx (ctx);
}
/* Return true if EXPR is variable sized. */
static inline bool
is_variable_sized (const_tree expr)
{
return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
}
/* Lookup variables. The "maybe" form
allows for the variable form to not have been entered, otherwise we
assert that the variable must have been entered. */
static inline tree
lookup_decl (tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (var);
return *n;
}
static inline tree
maybe_lookup_decl (const_tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
return n ? *n : NULL_TREE;
}
static inline tree
lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return (tree) n->value;
}
static inline tree
lookup_sfield (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->sfield_map
? ctx->sfield_map : ctx->field_map, key);
return (tree) n->value;
}
static inline tree
lookup_sfield (tree var, omp_context *ctx)
{
return lookup_sfield ((splay_tree_key) var, ctx);
}
static inline tree
maybe_lookup_field (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, key);
return n ? (tree) n->value : NULL_TREE;
}
static inline tree
maybe_lookup_field (tree var, omp_context *ctx)
{
return maybe_lookup_field ((splay_tree_key) var, ctx);
}
/* Return true if DECL should be copied by pointer. SHARED_CTX is
the parallel context if DECL is to be shared. */
static bool
use_pointer_for_field (tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl))
|| TYPE_ATOMIC (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_ctx)
{
gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
this to be false, or should we have a cleanup pass that removes
these from the list of mappings? */
if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
return true;
/* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
without analyzing the expression whether or not its location
is accessible to anyone else. In the case of nested parallel
regions it certainly may be. */
if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
return true;
/* Do not use copy-in/copy-out for variables that have their
address taken. */
if (TREE_ADDRESSABLE (decl))
return true;
/* lower_send_shared_vars only uses copy-in, but not copy-out
for these. */
if (TREE_READONLY (decl)
|| ((TREE_CODE (decl) == RESULT_DECL
|| TREE_CODE (decl) == PARM_DECL)
&& DECL_BY_REFERENCE (decl)))
return false;
/* Disallow copy-in/out in nested parallel if
decl is shared in outer parallel, otherwise
each thread could store the shared variable
in its own copy-in location, making the
variable no longer really shared. */
if (shared_ctx->is_nested)
{
omp_context *up;
for (up = shared_ctx->outer; up; up = up->outer)
if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
break;
if (up)
{
tree c;
for (c = gimple_omp_taskreg_clauses (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_DECL (c) == decl)
break;
if (c)
goto maybe_mark_addressable_and_ret;
}
}
/* For tasks avoid using copy-in/out. As tasks can be
deferred or executed in different thread, when GOMP_task
returns, the task hasn't necessarily terminated. */
if (is_task_ctx (shared_ctx))
{
tree outer;
maybe_mark_addressable_and_ret:
outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer))
{
/* Taking address of OUTER in lower_send_shared_vars
might need regimplification of everything that uses the
variable. */
if (!task_shared_vars)
task_shared_vars = BITMAP_ALLOC (NULL);
bitmap_set_bit (task_shared_vars, DECL_UID (outer));
TREE_ADDRESSABLE (outer) = 1;
}
return true;
}
}
return false;
}
/* Construct a new automatic decl similar to VAR. */
static tree
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
{
tree copy = copy_var_decl (var, name, type);
DECL_CONTEXT (copy) = current_function_decl;
DECL_CHAIN (copy) = ctx->block_vars;
/* If VAR is listed in task_shared_vars, it means it wasn't
originally addressable and is just because task needs to take
it's address. But we don't need to take address of privatizations
from that var. */
if (TREE_ADDRESSABLE (var)
&& task_shared_vars
&& bitmap_bit_p (task_shared_vars, DECL_UID (var)))
TREE_ADDRESSABLE (copy) = 0;
ctx->block_vars = copy;
return copy;
}
static tree
omp_copy_decl_1 (tree var, omp_context *ctx)
{
return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
}
/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
as appropriate. */
static tree
omp_build_component_ref (tree obj, tree field)
{
tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
if (TREE_THIS_VOLATILE (field))
TREE_THIS_VOLATILE (ret) |= 1;
if (TREE_READONLY (field))
TREE_READONLY (ret) |= 1;
return ret;
}
/* Build tree nodes to access the field for VAR on the receiver side. */
static tree
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
{
tree x, field = lookup_field (var, ctx);
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx);
if (x != NULL)
field = x;
x = build_simple_mem_ref (ctx->receiver_decl);
TREE_THIS_NOTRAP (x) = 1;
x = omp_build_component_ref (x, field);
if (by_ref)
{
x = build_simple_mem_ref (x);
TREE_THIS_NOTRAP (x) = 1;
}
return x;
}
/* Build tree nodes to access VAR in the scope outer to CTX. In the case
of a parallel, this is a component reference; for workshare constructs
this is some variable. */
static tree
build_outer_var_ref (tree var, omp_context *ctx,
enum omp_clause_code code = OMP_CLAUSE_ERROR)
{
tree x;
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else if (is_variable_sized (var))
{
x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
x = build_outer_var_ref (x, ctx, code);
x = build_simple_mem_ref (x);
}
else if (is_taskreg_ctx (ctx))
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
}
else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
|| (code == OMP_CLAUSE_PRIVATE
&& (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SINGLE)))
{
/* #pragma omp simd isn't a worksharing construct, and can reference
even private vars in its linear etc. clauses.
Similarly for OMP_CLAUSE_PRIVATE with outer ref, that can refer
to private vars in all worksharing constructs. */
x = NULL_TREE;
if (ctx->outer && is_taskreg_ctx (ctx))
x = lookup_decl (var, ctx->outer);
else if (ctx->outer)
x = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (x == NULL_TREE)
x = var;
}
else if (code == OMP_CLAUSE_LASTPRIVATE && is_taskloop_ctx (ctx))
{
gcc_assert (ctx->outer);
splay_tree_node n
= splay_tree_lookup (ctx->outer->field_map,
(splay_tree_key) &DECL_UID (var));
if (n == NULL)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx->outer)))
x = var;
else
x = lookup_decl (var, ctx->outer);
}
else
{
tree field = (tree) n->value;
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx->outer);
if (x != NULL)
field = x;
x = build_simple_mem_ref (ctx->outer->receiver_decl);
x = omp_build_component_ref (x, field);
if (use_pointer_for_field (var, ctx->outer))
x = build_simple_mem_ref (x);
}
}
else if (ctx->outer)
{
omp_context *outer = ctx->outer;
if (gimple_code (outer->stmt) == GIMPLE_OMP_GRID_BODY)
{
outer = outer->outer;
gcc_assert (outer
&& gimple_code (outer->stmt) != GIMPLE_OMP_GRID_BODY);
}
x = lookup_decl (var, outer);
}
else if (omp_is_reference (var))
/* This can happen with orphaned constructs. If var is reference, it is
possible it is shared and as such valid. */
x = var;
else if (omp_member_access_dummy_var (var))
x = var;
else
gcc_unreachable ();
if (x == var)
{
tree t = omp_member_access_dummy_var (var);
if (t)
{
x = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
x = unshare_and_remap (x, t, o);
else
x = unshare_expr (x);
}
}
if (omp_is_reference (var))
x = build_simple_mem_ref (x);
return x;
}
/* Build tree nodes to access the field for VAR on the sender side. */
static tree
build_sender_ref (splay_tree_key key, omp_context *ctx)
{
tree field = lookup_sfield (key, ctx);
return omp_build_component_ref (ctx->sender_decl, field);
}
static tree
build_sender_ref (tree var, omp_context *ctx)
{
return build_sender_ref ((splay_tree_key) var, ctx);
}
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. If
BASE_POINTERS_RESTRICT, declare the field with restrict. */
static void
install_var_field (tree var, bool by_ref, int mask, omp_context *ctx,
bool base_pointers_restrict = false)
{
tree field, type, sfield = NULL_TREE;
splay_tree_key key = (splay_tree_key) var;
if ((mask & 8) != 0)
{
key = (splay_tree_key) &DECL_UID (var);
gcc_checking_assert (key != (splay_tree_key) var);
}
gcc_assert ((mask & 1) == 0
|| !splay_tree_lookup (ctx->field_map, key));
gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
|| !splay_tree_lookup (ctx->sfield_map, key));
gcc_assert ((mask & 3) == 3
|| !is_gimple_omp_oacc (ctx->stmt));
type = TREE_TYPE (var);
/* Prevent redeclaring the var in the split-off function with a restrict
pointer type. Note that we only clear type itself, restrict qualifiers in
the pointed-to type will be ignored by points-to analysis. */
if (POINTER_TYPE_P (type)
&& TYPE_RESTRICT (type))
type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT);
if (mask & 4)
{
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
type = build_pointer_type (build_pointer_type (type));
}
else if (by_ref)
{
type = build_pointer_type (type);
if (base_pointers_restrict)
type = build_qualified_type (type, TYPE_QUAL_RESTRICT);
}
else if ((mask & 3) == 1 && omp_is_reference (var))
type = TREE_TYPE (type);
field = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
/* Remember what variable this field was created for. This does have a
side effect of making dwarf2out ignore this member, so for helpful
debugging we clear it later in delete_omp_context. */
DECL_ABSTRACT_ORIGIN (field) = var;
if (type == TREE_TYPE (var))
{
SET_DECL_ALIGN (field, DECL_ALIGN (var));
DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
}
else
SET_DECL_ALIGN (field, TYPE_ALIGN (type));
if ((mask & 3) == 3)
{
insert_field_into_struct (ctx->record_type, field);
if (ctx->srecord_type)
{
sfield = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
DECL_ABSTRACT_ORIGIN (sfield) = var;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
insert_field_into_struct (ctx->srecord_type, sfield);
}
}
else
{
if (ctx->srecord_type == NULL_TREE)
{
tree t;
ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
{
sfield = build_decl (DECL_SOURCE_LOCATION (t),
FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
insert_field_into_struct (ctx->srecord_type, sfield);
splay_tree_insert (ctx->sfield_map,
(splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
(splay_tree_value) sfield);
}
}
sfield = field;
insert_field_into_struct ((mask & 1) ? ctx->record_type
: ctx->srecord_type, field);
}
if (mask & 1)
splay_tree_insert (ctx->field_map, key, (splay_tree_value) field);
if ((mask & 2) && ctx->sfield_map)
splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield);
}
static tree
install_var_local (tree var, omp_context *ctx)
{
tree new_var = omp_copy_decl_1 (var, ctx);
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
/* Adjust the replacement for DECL in CTX for the new context. This means
copying the DECL_VALUE_EXPR, and fixing up the type. */
static void
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
{
tree new_decl, size;
new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
&& DECL_HAS_VALUE_EXPR_P (decl))
{
tree ve = DECL_VALUE_EXPR (decl);
walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
SET_DECL_VALUE_EXPR (new_decl, ve);
DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
}
if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
{
size = remap_decl (DECL_SIZE (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE (TREE_TYPE (new_decl));
DECL_SIZE (new_decl) = size;
size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
DECL_SIZE_UNIT (new_decl) = size;
}
}
/* The callback for remap_decl. Search all containing contexts for a
mapping of the variable; this avoids having to duplicate the splay
tree ahead of time. We know a mapping doesn't already exist in the
given context. Create new mappings to implement default semantics. */
static tree
omp_copy_decl (tree var, copy_body_data *cb)
{
omp_context *ctx = (omp_context *) cb;
tree new_var;
if (TREE_CODE (var) == LABEL_DECL)
{
if (FORCED_LABEL (var) || DECL_NONLOCAL (var))
return var;
new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
DECL_CONTEXT (new_var) = current_function_decl;
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
while (!is_taskreg_ctx (ctx))
{
ctx = ctx->outer;
if (ctx == NULL)
return var;
new_var = maybe_lookup_decl (var, ctx);
if (new_var)
return new_var;
}
if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
return var;
return error_mark_node;
}
/* Create a new context, with OUTER_CTX being the surrounding context. */
static omp_context *
new_omp_context (gimple *stmt, omp_context *outer_ctx)
{
omp_context *ctx = XCNEW (omp_context);
splay_tree_insert (all_contexts, (splay_tree_key) stmt,
(splay_tree_value) ctx);
ctx->stmt = stmt;
if (outer_ctx)
{
ctx->outer = outer_ctx;
ctx->cb = outer_ctx->cb;
ctx->cb.block = NULL;
ctx->depth = outer_ctx->depth + 1;
}
else
{
ctx->cb.src_fn = current_function_decl;
ctx->cb.dst_fn = current_function_decl;
ctx->cb.src_node = cgraph_node::get (current_function_decl);
gcc_checking_assert (ctx->cb.src_node);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
ctx->cb.eh_lp_nr = 0;
ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
ctx->depth = 1;
}
ctx->cb.decl_map = new hash_map<tree, tree>;
return ctx;
}
static gimple_seq maybe_catch_exception (gimple_seq);
/* Finalize task copyfn. */
static void
finalize_task_copyfn (gomp_task *task_stmt)
{
struct function *child_cfun;
tree child_fn;
gimple_seq seq = NULL, new_seq;
gbind *bind;
child_fn = gimple_omp_task_copy_fn (task_stmt);
if (child_fn == NULL_TREE)
return;
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
push_cfun (child_cfun);
bind = gimplify_body (child_fn, false);
gimple_seq_add_stmt (&seq, bind);
new_seq = maybe_catch_exception (seq);
if (new_seq != seq)
{
bind = gimple_build_bind (NULL, new_seq, NULL);
seq = NULL;
gimple_seq_add_stmt (&seq, bind);
}
gimple_set_body (child_fn, seq);
pop_cfun ();
/* Inform the callgraph about the new function. */
cgraph_node *node = cgraph_node::get_create (child_fn);
node->parallelized_function = 1;
cgraph_node::add_new_function (child_fn, false);
}
/* Destroy a omp_context data structures. Called through the splay tree
value delete callback. */
static void
delete_omp_context (splay_tree_value value)
{
omp_context *ctx = (omp_context *) value;
delete ctx->cb.decl_map;
if (ctx->field_map)
splay_tree_delete (ctx->field_map);
if (ctx->sfield_map)
splay_tree_delete (ctx->sfield_map);
/* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
it produces corrupt debug information. */
if (ctx->record_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (ctx->srecord_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (is_task_ctx (ctx))
finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
XDELETE (ctx);
}
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
context. */
static void
fixup_child_record_type (omp_context *ctx)
{
tree f, type = ctx->record_type;
if (!ctx->receiver_decl)
return;
/* ??? It isn't sufficient to just call remap_type here, because
variably_modified_type_p doesn't work the way we expect for
record types. Testing each field for whether it needs remapping
and creating a new record by hand works, however. */
for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
break;
if (f)
{
tree name, new_fields = NULL;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (ctx->record_type));
name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
DECL_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
&ctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&ctx->cb, NULL);
new_fields = new_f;
/* Arrange to be able to look up the receiver field
given the sender field. */
splay_tree_insert (ctx->field_map, (splay_tree_key) f,
(splay_tree_value) new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
}
/* In a target region we never modify any of the pointers in *.omp_data_i,
so attempt to help the optimizers. */
if (is_gimple_omp_offloaded (ctx->stmt))
type = build_qualified_type (type, TYPE_QUAL_CONST);
TREE_TYPE (ctx->receiver_decl)
= build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
}
/* Instantiate decls as necessary in CTX to satisfy the data sharing
specified by CLAUSES. If BASE_POINTERS_RESTRICT, install var field with
restrict. */
static void
scan_sharing_clauses (tree clauses, omp_context *ctx,
bool base_pointers_restrict = false)
{
tree c, decl;
bool scan_array_reductions = false;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
bool by_ref;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
goto do_private;
else if (!is_variable_sized (decl))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
if (is_global_var (odecl))
break;
insert_decl_map (&ctx->cb, decl, odecl);
break;
}
gcc_assert (is_taskreg_ctx (ctx));
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
use_pointer_for_field (decl, ctx);
break;
}
by_ref = use_pointer_for_field (decl, NULL);
if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| omp_is_reference (decl))
{
by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 3, ctx);
install_var_local (decl, ctx);
break;
}
/* We don't need to copy const scalar vars back. */
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
goto do_private;
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& TREE_CODE (decl) == MEM_REF)
{
tree t = TREE_OPERAND (decl, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == INDIRECT_REF
|| TREE_CODE (t) == ADDR_EXPR)
t = TREE_OPERAND (t, 0);
install_var_local (t, ctx);
if (is_taskreg_ctx (ctx)
&& !is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
&& !is_variable_sized (t))
{
by_ref = use_pointer_for_field (t, ctx);
install_var_field (t, by_ref, 3, ctx);
}
break;
}
goto do_private;
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LINEAR:
decl = OMP_CLAUSE_DECL (c);
do_private:
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
install_var_field (decl, !omp_is_reference (decl), 3, ctx);
else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 3, ctx);
else
install_var_field (decl, false, 3, ctx);
}
if (is_variable_sized (decl))
{
if (is_task_ctx (ctx))
install_var_field (decl, false, 1, ctx);
break;
}
else if (is_taskreg_ctx (ctx))
{
bool global
= is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
by_ref = use_pointer_for_field (decl, NULL);
if (is_task_ctx (ctx)
&& (global || by_ref || omp_is_reference (decl)))
{
install_var_field (decl, false, 1, ctx);
if (!global)
install_var_field (decl, by_ref, 2, ctx);
}
else if (!global)
install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 3, ctx);
else
install_var_field (decl, false, 3, ctx);
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
goto do_private;
case OMP_CLAUSE__LOOPTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
install_var_field (decl, false, 3, ctx);
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, 3, ctx);
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE__CILK_FOR_COUNT_:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_MAP:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
/* Global variables with "omp declare target" attribute
don't need to be copied, the receiver side will use them
directly. However, global variables with "omp declare target link"
attribute need to be copied. */
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
break;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
{
/* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
not offloaded; there is nothing to map for those. */
if (!is_gimple_omp_offloaded (ctx->stmt)
&& !POINTER_TYPE_P (TREE_TYPE (decl))
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
{
if (TREE_CODE (decl) == COMPONENT_REF
|| (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE)))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
}
if (DECL_P (decl))
{
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_field (decl2, true, 3, ctx);
install_var_local (decl2, ctx);
install_var_local (decl, ctx);
}
else
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 7, ctx);
else
install_var_field (decl, true, 3, ctx,
base_pointers_restrict);
if (is_gimple_omp_offloaded (ctx->stmt)
&& !OMP_CLAUSE_MAP_IN_REDUCTION (c))
install_var_local (decl, ctx);
}
}
else
{
tree base = get_base_address (decl);
tree nc = OMP_CLAUSE_CHAIN (c);
if (DECL_P (base)
&& nc != NULL_TREE
&& OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_DECL (nc) == base
&& OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
&& integer_zerop (OMP_CLAUSE_SIZE (nc)))
{
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
}
else
{
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
}
gcc_assert (!splay_tree_lookup (ctx->field_map,
(splay_tree_key) decl));
tree field
= build_decl (OMP_CLAUSE_LOCATION (c),
FIELD_DECL, NULL_TREE, ptr_type_node);
SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node));
insert_field_into_struct (ctx->record_type, field);
splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
(splay_tree_value) field);
}
}
break;
case OMP_CLAUSE__GRIDDIM_:
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE__GRIDDIM__SIZE (c), ctx->outer);
scan_omp_op (&OMP_CLAUSE__GRIDDIM__GROUP (c), ctx->outer);
}
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__SIMT_:
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (decl)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_local (decl, ctx);
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_array_reductions = true;
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
fixup_remapped_decl (decl2, ctx, false);
}
install_var_local (decl, ctx);
}
fixup_remapped_decl (decl, ctx,
OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_PRIVATE_DEBUG (c));
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) != MEM_REF)
{
if (is_variable_sized (decl))
install_var_local (decl, ctx);
fixup_remapped_decl (decl, ctx, false);
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
break;
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
ctx->outer)))
break;
bool by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 11, ctx);
break;
}
fixup_remapped_decl (decl, ctx, false);
break;
case OMP_CLAUSE_MAP:
if (!is_gimple_omp_offloaded (ctx->stmt))
break;
decl = OMP_CLAUSE_DECL (c);
if (DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable)
break;
if (DECL_P (decl))
{
if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
&& !COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
tree new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl)
= remap_type (TREE_TYPE (decl), &ctx->cb);
}
else if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
fixup_remapped_decl (decl2, ctx, false);
fixup_remapped_decl (decl, ctx, true);
}
else
fixup_remapped_decl (decl, ctx, false);
}
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE__CILK_FOR_COUNT_:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__GRIDDIM_:
case OMP_CLAUSE__SIMT_:
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
gcc_checking_assert (!scan_array_reductions
|| !is_gimple_omp_oacc (ctx->stmt));
if (scan_array_reductions)
{
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
}
}
/* Create a new name for omp child function. Returns an identifier. If
IS_CILK_FOR is true then the suffix for the child function is
"_cilk_for_fn." */
static tree
create_omp_child_function_name (bool task_copy, bool is_cilk_for)
{
if (is_cilk_for)
return clone_function_name (current_function_decl, "_cilk_for_fn");
return clone_function_name (current_function_decl,
task_copy ? "_omp_cpyfn" : "_omp_fn");
}
/* Returns the type of the induction variable for the child function for
_Cilk_for and the types for _high and _low variables based on TYPE. */
static tree
cilk_for_check_loop_diff_type (tree type)
{
if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
{
if (TYPE_UNSIGNED (type))
return uint32_type_node;
else
return integer_type_node;
}
else
{
if (TYPE_UNSIGNED (type))
return uint64_type_node;
else
return long_long_integer_type_node;
}
}
/* Return true if CTX may belong to offloaded code: either if current function
is offloaded, or any enclosing context corresponds to a target region. */
static bool
omp_maybe_offloaded_ctx (omp_context *ctx)
{
if (cgraph_node::get (current_function_decl)->offloadable)
return true;
for (; ctx; ctx = ctx->outer)
if (is_gimple_omp_offloaded (ctx->stmt))
return true;
return false;
}
/* Build a decl for the omp child function. It'll not contain a body
yet, just the bare decl. */
static void
create_omp_child_function (omp_context *ctx, bool task_copy)
{
tree decl, type, name, t;
tree cilk_for_count
= (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
? omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
tree cilk_var_type = NULL_TREE;
name = create_omp_child_function_name (task_copy,
cilk_for_count != NULL_TREE);
if (task_copy)
type = build_function_type_list (void_type_node, ptr_type_node,
ptr_type_node, NULL_TREE);
else if (cilk_for_count)
{
type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
cilk_var_type = cilk_for_check_loop_diff_type (type);
type = build_function_type_list (void_type_node, ptr_type_node,
cilk_var_type, cilk_var_type, NULL_TREE);
}
else
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
|| !task_copy);
if (!task_copy)
ctx->cb.dst_fn = decl;
else
gimple_omp_task_set_copy_fn (ctx->stmt, decl);
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
BLOCK_SUPERCONTEXT (DECL_INITIAL (decl)) = decl;
if (omp_maybe_offloaded_ctx (ctx))
{
cgraph_node::get_create (decl)->offloadable = 1;
if (ENABLE_OFFLOADING)
g->have_offload = true;
}
if (cgraph_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (current_function_decl)))
{
const char *target_attr = (is_gimple_omp_offloaded (ctx->stmt)
? "omp target entrypoint"
: "omp declare target");
DECL_ATTRIBUTES (decl)
= tree_cons (get_identifier (target_attr),
NULL_TREE, DECL_ATTRIBUTES (decl));
}
t = build_decl (DECL_SOURCE_LOCATION (decl),
RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_CONTEXT (t) = decl;
DECL_RESULT (decl) = t;
/* _Cilk_for's child function requires two extra parameters called
__low and __high that are set the by Cilk runtime when it calls this
function. */
if (cilk_for_count)
{
t = build_decl (DECL_SOURCE_LOCATION (decl),
PARM_DECL, get_identifier ("__high"), cilk_var_type);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
t = build_decl (DECL_SOURCE_LOCATION (decl),
PARM_DECL, get_identifier ("__low"), cilk_var_type);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
}
tree data_name = get_identifier (".omp_data_i");
t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_READONLY (t) = 1;
if (cilk_for_count)
DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
if (!task_copy)
ctx->receiver_decl = t;
else
{
t = build_decl (DECL_SOURCE_LOCATION (decl),
PARM_DECL, get_identifier (".omp_data_o"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_ADDRESSABLE (t) = 1;
DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
}
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
push_struct_function (decl);
cfun->function_end_locus = gimple_location (ctx->stmt);
init_tree_ssa (cfun);
pop_cfun ();
}
/* Callback for walk_gimple_seq. Check if combined parallel
contains gimple_omp_for_combined_into_p OMP_FOR. */
tree
omp_find_combined_for (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_combined_into_p (stmt)
&& gimple_omp_for_kind (stmt)
== *(const enum gf_mask *) (wi->info))
{
wi->info = stmt;
return integer_zero_node;
}
break;
default:
break;
}
return NULL;
}
/* Add _LOOPTEMP_ clauses on OpenMP parallel or task. */
static void
add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
omp_context *outer_ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &msk;
walk_gimple_seq (gimple_omp_body (stmt), omp_find_combined_for, NULL, &wi);
if (wi.info != (void *) &msk)
{
gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info);
struct omp_for_data fd;
omp_extract_for_data (for_stmt, &fd, NULL);
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2, i;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
{
count += fd.collapse - 1;
/* If there are lastprivate clauses on the inner
GIMPLE_OMP_FOR, add one more temporaries for the total number
of iterations (product of count1 ... countN-1). */
if (omp_find_clause (gimple_omp_for_clauses (for_stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
else if (msk == GF_OMP_FOR_KIND_FOR
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
}
for (i = 0; i < count; i++)
{
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
gimple_omp_taskreg_set_clauses (stmt, c);
}
}
}
/* Scan an OpenMP parallel directive. */
static void
scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
/* Ignore parallel directives with empty bodies, unless there
are copyin clauses. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt))
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_COPYIN) == NULL)
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_parallel_combined_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
ctx = new_omp_context (stmt, outer_ctx);
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
if (!gimple_omp_parallel_grid_phony (stmt))
{
create_omp_child_function (ctx, false);
gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
}
scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
}
/* Scan an OpenMP task directive. */
static void
scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name, t;
gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
/* Ignore task directives with empty bodies, unless they have depend
clause. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt))
&& !omp_find_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND))
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_task_taskloop_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
ctx = new_omp_context (stmt, outer_ctx);
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
create_omp_child_function (ctx, false);
gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
if (ctx->srecord_type)
{
name = create_tmp_var_name (".omp_data_a");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->srecord_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->srecord_type) = name;
TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
create_omp_child_function (ctx, true);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
{
ctx->record_type = ctx->receiver_decl = NULL;
t = build_int_cst (long_integer_type_node, 0);
gimple_omp_task_set_arg_size (stmt, t);
t = build_int_cst (long_integer_type_node, 1);
gimple_omp_task_set_arg_align (stmt, t);
}
}
/* Helper function for finish_taskreg_scan, called through walk_tree.
If maybe_lookup_decl_in_outer_context returns non-NULL for some
tree, replace it in the expression. */
static tree
finish_taskreg_remap (tree *tp, int *walk_subtrees, void *data)
{
if (VAR_P (*tp))
{
omp_context *ctx = (omp_context *) data;
tree t = maybe_lookup_decl_in_outer_ctx (*tp, ctx);
if (t != *tp)
{
if (DECL_HAS_VALUE_EXPR_P (t))
t = unshare_expr (DECL_VALUE_EXPR (t));
*tp = t;
}
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* If any decls have been made addressable during scan_omp,
adjust their fields if needed, and layout record types
of parallel/task constructs. */
static void
finish_taskreg_scan (omp_context *ctx)
{
if (ctx->record_type == NULL_TREE)
return;
/* If any task_shared_vars were needed, verify all
OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
statements if use_pointer_for_field hasn't changed
because of that. If it did, update field types now. */
if (task_shared_vars)
{
tree c;
for (c = gimple_omp_taskreg_clauses (ctx->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
tree decl = OMP_CLAUSE_DECL (c);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
continue;
if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
|| !use_pointer_for_field (decl, ctx))
continue;
tree field = lookup_field (decl, ctx);
if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
&& TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
continue;
TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
TREE_THIS_VOLATILE (field) = 0;
DECL_USER_ALIGN (field) = 0;
SET_DECL_ALIGN (field, TYPE_ALIGN (TREE_TYPE (field)));
if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (ctx->record_type, DECL_ALIGN (field));
if (ctx->srecord_type)
{
tree sfield = lookup_sfield (decl, ctx);
TREE_TYPE (sfield) = TREE_TYPE (field);
TREE_THIS_VOLATILE (sfield) = 0;
DECL_USER_ALIGN (sfield) = 0;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
SET_TYPE_ALIGN (ctx->srecord_type, DECL_ALIGN (sfield));
}
}
}
if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
{
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
else
{
location_t loc = gimple_location (ctx->stmt);
tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
/* Move VLA fields to the end. */
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
|| ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
{
*q = *p;
*p = TREE_CHAIN (*p);
TREE_CHAIN (*q) = NULL_TREE;
q = &TREE_CHAIN (*q);
}
else
p = &DECL_CHAIN (*p);
*p = vla_fields;
if (gimple_omp_task_taskloop_p (ctx->stmt))
{
/* Move fields corresponding to first and second _looptemp_
clause first. There are filled by GOMP_taskloop
and thus need to be in specific positions. */
tree c1 = gimple_omp_task_clauses (ctx->stmt);
c1 = omp_find_clause (c1, OMP_CLAUSE__LOOPTEMP_);
tree c2 = omp_find_clause (OMP_CLAUSE_CHAIN (c1),
OMP_CLAUSE__LOOPTEMP_);
tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (*p == f1 || *p == f2)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
TYPE_FIELDS (ctx->record_type) = f1;
if (ctx->srecord_type)
{
f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
p = &TYPE_FIELDS (ctx->srecord_type);
while (*p)
if (*p == f1 || *p == f2)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
TYPE_FIELDS (ctx->srecord_type) = f1;
}
}
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
if (ctx->srecord_type)
layout_type (ctx->srecord_type);
tree t = fold_convert_loc (loc, long_integer_type_node,
TYPE_SIZE_UNIT (ctx->record_type));
if (TREE_CODE (t) != INTEGER_CST)
{
t = unshare_expr (t);
walk_tree (&t, finish_taskreg_remap, ctx, NULL);
}
gimple_omp_task_set_arg_size (ctx->stmt, t);
t = build_int_cst (long_integer_type_node,
TYPE_ALIGN_UNIT (ctx->record_type));
gimple_omp_task_set_arg_align (ctx->stmt, t);
}
}
/* Find the enclosing offload context. */
static omp_context *
enclosing_target_ctx (omp_context *ctx)
{
for (; ctx; ctx = ctx->outer)
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET)
break;
return ctx;
}
/* Return true if ctx is part of an oacc kernels region. */
static bool
ctx_in_oacc_kernels_region (omp_context *ctx)
{
for (;ctx != NULL; ctx = ctx->outer)
{
gimple *stmt = ctx->stmt;
if (gimple_code (stmt) == GIMPLE_OMP_TARGET
&& gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
return true;
}
return false;
}
/* Check the parallelism clauses inside a kernels regions.
Until kernels handling moves to use the same loop indirection
scheme as parallel, we need to do this checking early. */
static unsigned
check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx)
{
bool checking = true;
unsigned outer_mask = 0;
unsigned this_mask = 0;
bool has_seq = false, has_auto = false;
if (ctx->outer)
outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer);
if (!stmt)
{
checking = false;
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR)
return outer_mask;
stmt = as_a <gomp_for *> (ctx->stmt);
}
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG);
break;
case OMP_CLAUSE_WORKER:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER);
break;
case OMP_CLAUSE_VECTOR:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR);
break;
case OMP_CLAUSE_SEQ:
has_seq = true;
break;
case OMP_CLAUSE_AUTO:
has_auto = true;
break;
default:
break;
}
}
if (checking)
{
if (has_seq && (this_mask || has_auto))
error_at (gimple_location (stmt), "%<seq%> overrides other"
" OpenACC loop specifiers");
else if (has_auto && this_mask)
error_at (gimple_location (stmt), "%<auto%> conflicts with other"
" OpenACC loop specifiers");
if (this_mask & outer_mask)
error_at (gimple_location (stmt), "inner loop uses same"
" OpenACC parallelism as containing loop");
}
return outer_mask | this_mask;
}
/* Scan a GIMPLE_OMP_FOR. */
static omp_context *
scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
size_t i;
tree clauses = gimple_omp_for_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
if (is_gimple_omp_oacc (stmt))
{
omp_context *tgt = enclosing_target_ctx (outer_ctx);
if (!tgt || is_oacc_parallel (tgt))
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
char const *check = NULL;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
check = "gang";
break;
case OMP_CLAUSE_WORKER:
check = "worker";
break;
case OMP_CLAUSE_VECTOR:
check = "vector";
break;
default:
break;
}
if (check && OMP_CLAUSE_OPERAND (c, 0))
error_at (gimple_location (stmt),
"argument not permitted on %qs clause in"
" OpenACC %<parallel%>", check);
}
if (tgt && is_oacc_kernels (tgt))
{
/* Strip out reductions, as they are not handled yet. */
tree *prev_ptr = &clauses;
while (tree probe = *prev_ptr)
{
tree *next_ptr = &OMP_CLAUSE_CHAIN (probe);
if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION)
*prev_ptr = *next_ptr;
else
prev_ptr = next_ptr;
}
gimple_omp_for_set_clauses (stmt, clauses);
check_oacc_kernel_gwv (stmt, ctx);
}
}
scan_sharing_clauses (clauses, ctx);
scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
return ctx;
}
/* Duplicate #pragma omp simd, one for SIMT, another one for SIMD. */
static void
scan_omp_simd (gimple_stmt_iterator *gsi, gomp_for *stmt,
omp_context *outer_ctx)
{
gbind *bind = gimple_build_bind (NULL, NULL, NULL);
gsi_replace (gsi, bind, false);
gimple_seq seq = NULL;
gimple *g = gimple_build_call_internal (IFN_GOMP_USE_SIMT, 0);
tree cond = create_tmp_var_raw (integer_type_node);
DECL_CONTEXT (cond) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (cond) = 1;
gimple_bind_set_vars (bind, cond);
gimple_call_set_lhs (g, cond);
gimple_seq_add_stmt (&seq, g);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, cond, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (&seq, g);
gimple_seq new_seq = copy_gimple_seq_and_replace_locals (stmt);
gomp_for *new_stmt = as_a <gomp_for *> (new_seq);
tree clause = build_omp_clause (gimple_location (stmt), OMP_CLAUSE__SIMT_);
OMP_CLAUSE_CHAIN (clause) = gimple_omp_for_clauses (new_stmt);
gimple_omp_for_set_clauses (new_stmt, clause);
gimple_seq_add_stmt (&seq, new_stmt);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (&seq, g);
gimple_seq_add_stmt (&seq, stmt);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (&seq, g);
gimple_bind_set_body (bind, seq);
update_stmt (bind);
scan_omp_for (new_stmt, outer_ctx);
scan_omp_for (stmt, outer_ctx)->simt_stmt = new_stmt;
}
/* Scan an OpenMP sections directive. */
static void
scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
}
/* Scan an OpenMP single directive. */
static void
scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_copy_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = NULL;
else
layout_type (ctx->record_type);
}
/* Return true if the CLAUSES of an omp target guarantee that the base pointers
used in the corresponding offloaded function are restrict. */
static bool
omp_target_base_pointers_restrict_p (tree clauses)
{
/* The analysis relies on the GOMP_MAP_FORCE_* mapping kinds, which are only
used by OpenACC. */
if (flag_openacc == 0)
return false;
/* I. Basic example:
void foo (void)
{
unsigned int a[2], b[2];
#pragma acc kernels \
copyout (a) \
copyout (b)
{
a[0] = 0;
b[0] = 1;
}
}
After gimplification, we have:
#pragma omp target oacc_kernels \
map(force_from:a [len: 8]) \
map(force_from:b [len: 8])
{
a[0] = 0;
b[0] = 1;
}
Because both mappings have the force prefix, we know that they will be
allocated when calling the corresponding offloaded function, which means we
can mark the base pointers for a and b in the offloaded function as
restrict. */
tree c;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
return false;
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
break;
default:
return false;
}
}
return true;
}
/* Scan a GIMPLE_OMP_TARGET. */
static void
scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
bool offloaded = is_gimple_omp_offloaded (stmt);
tree clauses = gimple_omp_target_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_t");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
bool base_pointers_restrict = false;
if (offloaded)
{
create_omp_child_function (ctx, false);
gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
base_pointers_restrict = omp_target_base_pointers_restrict_p (clauses);
if (base_pointers_restrict
&& dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"Base pointers in offloaded function are restrict\n");
}
scan_sharing_clauses (clauses, ctx, base_pointers_restrict);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
else
{
TYPE_FIELDS (ctx->record_type)
= nreverse (TYPE_FIELDS (ctx->record_type));
if (flag_checking)
{
unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
for (tree field = TYPE_FIELDS (ctx->record_type);
field;
field = DECL_CHAIN (field))
gcc_assert (DECL_ALIGN (field) == align);
}
layout_type (ctx->record_type);
if (offloaded)
fixup_child_record_type (ctx);
}
}
/* Scan an OpenMP teams directive. */
static void
scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
{
omp_context *ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
}
/* Check nesting restrictions. */
static bool
check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
{
tree c;
if (ctx && gimple_code (ctx->stmt) == GIMPLE_OMP_GRID_BODY)
/* GRID_BODY is an artificial construct, nesting rules will be checked in
the original copy of its contents. */
return true;
/* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
inside an OpenACC CTX. */
if (!(is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt))
/* Except for atomic codes that we share with OpenMP. */
&& !(gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
{
if (oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC routine");
return false;
}
else
for (omp_context *octx = ctx; octx != NULL; octx = octx->outer)
if (is_gimple_omp (octx->stmt)
&& is_gimple_omp_oacc (octx->stmt))
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC region");
return false;
}
}
if (ctx != NULL)
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
{
c = NULL_TREE;
if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
{
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
if (omp_find_clause (c, OMP_CLAUSE_THREADS)
&& (ctx->outer == NULL
|| !gimple_omp_for_combined_into_p (ctx->stmt)
|| gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR
|| (gimple_omp_for_kind (ctx->outer->stmt)
!= GF_OMP_FOR_KIND_FOR)
|| !gimple_omp_for_combined_p (ctx->outer->stmt)))
{
error_at (gimple_location (stmt),
"%<ordered simd threads%> must be closely "
"nested inside of %<for simd%> region");
return false;
}
return true;
}
}
error_at (gimple_location (stmt),
"OpenMP constructs other than %<#pragma omp ordered simd%>"
" may not be nested inside %<simd%> region");
return false;
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
if ((gimple_code (stmt) != GIMPLE_OMP_FOR
|| ((gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE)
&& (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP)))
&& gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
{
error_at (gimple_location (stmt),
"only %<distribute%> or %<parallel%> regions are "
"allowed to be strictly nested inside %<teams%> "
"region");
return false;
}
}
}
switch (gimple_code (stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
{
error_at (gimple_location (stmt),
"%<distribute%> region must be strictly nested "
"inside %<teams%> construct");
return false;
}
return true;
}
/* We split taskloop into task and nested taskloop in it. */
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
{
bool ok = false;
if (ctx)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
ok = (gimple_omp_for_kind (ctx->stmt)
== GF_OMP_FOR_KIND_OACC_LOOP);
break;
case GIMPLE_OMP_TARGET:
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
ok = true;
break;
default:
break;
}
default:
break;
}
else if (oacc_get_fn_attrib (current_function_decl))
ok = true;
if (!ok)
{
error_at (gimple_location (stmt),
"OpenACC loop directive must be associated with"
" an OpenACC compute region");
return false;
}
}
/* FALLTHRU */
case GIMPLE_CALL:
if (is_gimple_call (stmt)
&& (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
|| DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCELLATION_POINT))
{
const char *bad = NULL;
const char *kind = NULL;
const char *construct
= (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL)
? "#pragma omp cancel"
: "#pragma omp cancellation point";
if (ctx == NULL)
{
error_at (gimple_location (stmt), "orphaned %qs construct",
construct);
return false;
}
switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
? tree_to_shwi (gimple_call_arg (stmt, 0))
: 0)
{
case 1:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
bad = "#pragma omp parallel";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
ctx->cancellable = true;
kind = "parallel";
break;
case 2:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
bad = "#pragma omp for";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel for%> inside "
"%<nowait%> for construct");
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel for%> inside "
"%<ordered%> for construct");
}
kind = "for";
break;
case 4:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
&& gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
bad = "#pragma omp sections";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel sections%> inside "
"%<nowait%> sections construct");
}
else
{
gcc_assert (ctx->outer
&& gimple_code (ctx->outer->stmt)
== GIMPLE_OMP_SECTIONS);
ctx->outer->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->outer->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel sections%> inside "
"%<nowait%> sections construct");
}
}
kind = "sections";
break;
case 8:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
bad = "#pragma omp task";
else
{
for (omp_context *octx = ctx->outer;
octx; octx = octx->outer)
{
switch (gimple_code (octx->stmt))
{
case GIMPLE_OMP_TASKGROUP:
break;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (octx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
continue;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<%s taskgroup%> construct not closely "
"nested inside of %<taskgroup%> region",
construct);
return false;
default:
continue;
}
break;
}
ctx->cancellable = true;
}
kind = "taskgroup";
break;
default:
error_at (gimple_location (stmt), "invalid arguments");
return false;
}
if (bad)
{
error_at (gimple_location (stmt),
"%<%s %s%> construct not closely nested inside of %qs",
construct, kind, bad);
return false;
}
}
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_CRITICAL:
if (is_gimple_call (stmt))
{
if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
!= BUILT_IN_GOMP_BARRIER)
return true;
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
"of work-sharing, %<critical%>, %<ordered%>, "
"%<master%>, explicit %<task%> or %<taskloop%> "
"region");
return false;
}
error_at (gimple_location (stmt),
"work-sharing region may not be closely nested inside "
"of work-sharing, %<critical%>, %<ordered%>, "
"%<master%>, explicit %<task%> or %<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_MASTER:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TASK:
error_at (gimple_location (stmt),
"%<master%> region may not be closely nested inside "
"of work-sharing, explicit %<task%> or %<taskloop%> "
"region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_TASK:
for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
break;
case GIMPLE_OMP_ORDERED:
for (c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
{
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREADS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SIMD);
continue;
}
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
if (kind == OMP_CLAUSE_DEPEND_SOURCE
|| kind == OMP_CLAUSE_DEPEND_SINK)
{
tree oclause;
/* Look for containing ordered(N) loop. */
if (ctx == NULL
|| gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| (oclause
= omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED)) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside an %<ordered%> "
"loop");
return false;
}
else if (OMP_CLAUSE_ORDERED_EXPR (oclause) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside a loop with "
"%<ordered%> clause with a parameter");
return false;
}
}
else
{
error_at (OMP_CLAUSE_LOCATION (c),
"invalid depend kind in omp %<ordered%> %<depend%>");
return false;
}
}
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
/* ordered simd must be closely nested inside of simd region,
and simd region must not encounter constructs other than
ordered simd, therefore ordered simd may be either orphaned,
or ctx->stmt must be simd. The latter case is handled already
earlier. */
if (ctx != NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> %<simd%> must be closely nested inside "
"%<simd%> region");
return false;
}
}
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_ORDERED:
ordered_in_taskloop:
error_at (gimple_location (stmt),
"%<ordered%> region may not be closely nested inside "
"of %<critical%>, %<ordered%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP)
goto ordered_in_taskloop;
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED) == NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
}
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
break;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
default:
break;
}
break;
case GIMPLE_OMP_CRITICAL:
{
tree this_stmt_name
= gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
for (; ctx != NULL; ctx = ctx->outer)
if (gomp_critical *other_crit
= dyn_cast <gomp_critical *> (ctx->stmt))
if (this_stmt_name == gimple_omp_critical_name (other_crit))
{
error_at (gimple_location (stmt),
"%<critical%> region may not be nested inside "
"a %<critical%> region with the same name");
return false;
}
}
break;
case GIMPLE_OMP_TEAMS:
if (ctx == NULL
|| gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
|| gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
{
error_at (gimple_location (stmt),
"%<teams%> construct not closely nested inside of "
"%<target%> construct");
return false;
}
break;
case GIMPLE_OMP_TARGET:
for (c = gimple_omp_target_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
if (is_gimple_omp_offloaded (stmt)
&& oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"OpenACC region inside of OpenACC routine, nested "
"parallelism not supported yet");
return false;
}
for (; ctx != NULL; ctx = ctx->outer)
{
if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
{
if (is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt)
&& is_gimple_omp (ctx->stmt))
{
error_at (gimple_location (stmt),
"OpenACC construct inside of non-OpenACC region");
return false;
}
continue;
}
const char *stmt_name, *ctx_stmt_name;
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
case GF_OMP_TARGET_KIND_ENTER_DATA:
stmt_name = "target enter data"; break;
case GF_OMP_TARGET_KIND_EXIT_DATA:
stmt_name = "target exit data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
stmt_name = "enter/exit data"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA: stmt_name = "host_data";
break;
default: gcc_unreachable ();
}
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
ctx_stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS:
ctx_stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
ctx_stmt_name = "host_data"; break;
default: gcc_unreachable ();
}
/* OpenACC/OpenMP mismatch? */
if (is_gimple_omp_oacc (stmt)
!= is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%s %qs construct inside of %s %qs region",
(is_gimple_omp_oacc (stmt)
? "OpenACC" : "OpenMP"), stmt_name,
(is_gimple_omp_oacc (ctx->stmt)
? "OpenACC" : "OpenMP"), ctx_stmt_name);
return false;
}
if (is_gimple_omp_offloaded (ctx->stmt))
{
/* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
if (is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
return false;
}
else
{
warning_at (gimple_location (stmt), 0,
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
}
}
}
break;
default:
break;
}
return true;
}
/* Helper function scan_omp.
Callback for walk_tree or operators in walk_gimple_stmt used to
scan for OMP directives in TP. */
static tree
scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
omp_context *ctx = (omp_context *) wi->info;
tree t = *tp;
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
if (ctx)
{
tree repl = remap_decl (t, &ctx->cb);
gcc_checking_assert (TREE_CODE (repl) != ERROR_MARK);
*tp = repl;
}
break;
default:
if (ctx && TYPE_P (t))
*tp = remap_type (t, &ctx->cb);
else if (!DECL_P (t))
{
*walk_subtrees = 1;
if (ctx)
{
tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
if (tem != TREE_TYPE (t))
{
if (TREE_CODE (t) == INTEGER_CST)
*tp = wide_int_to_tree (tem, t);
else
TREE_TYPE (t) = tem;
}
}
}
break;
}
return NULL_TREE;
}
/* Return true if FNDECL is a setjmp or a longjmp. */
static bool
setjmp_or_longjmp_p (const_tree fndecl)
{
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
|| DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
return true;
tree declname = DECL_NAME (fndecl);
if (!declname)
return false;
const char *name = IDENTIFIER_POINTER (declname);
return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
}
/* Helper function for scan_omp.
Callback for walk_gimple_stmt used to scan for OMP directives in
the current statement in GSI. */
static tree
scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi);
omp_context *ctx = (omp_context *) wi->info;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
/* Check the nesting restrictions. */
bool remove = false;
if (is_gimple_omp (stmt))
remove = !check_omp_nesting_restrictions (stmt, ctx);
else if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl)
{
if (setjmp_or_longjmp_p (fndecl)
&& ctx
&& gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
{
remove = true;
error_at (gimple_location (stmt),
"setjmp/longjmp inside simd construct");
}
else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
case BUILT_IN_GOMP_TASKYIELD:
case BUILT_IN_GOMP_TASKWAIT:
case BUILT_IN_GOMP_TASKGROUP_START:
case BUILT_IN_GOMP_TASKGROUP_END:
remove = !check_omp_nesting_restrictions (stmt, ctx);
break;
default:
break;
}
}
}
if (remove)
{
stmt = gimple_build_nop ();
gsi_replace (gsi, stmt, false);
}
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_OMP_PARALLEL:
taskreg_nesting_level++;
scan_omp_parallel (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_TASK:
taskreg_nesting_level++;
scan_omp_task (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_FOR:
if (((gimple_omp_for_kind (as_a <gomp_for *> (stmt))
& GF_OMP_FOR_KIND_MASK) == GF_OMP_FOR_KIND_SIMD)
&& omp_maybe_offloaded_ctx (ctx)
&& omp_max_simt_vf ())
scan_omp_simd (gsi, as_a <gomp_for *> (stmt), ctx);
else
scan_omp_for (as_a <gomp_for *> (stmt), ctx);
break;
case GIMPLE_OMP_SECTIONS:
scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
break;
case GIMPLE_OMP_SINGLE:
scan_omp_single (as_a <gomp_single *> (stmt), ctx);
break;
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_GRID_BODY:
ctx = new_omp_context (stmt, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
case GIMPLE_OMP_TARGET:
scan_omp_target (as_a <gomp_target *> (stmt), ctx);
break;
case GIMPLE_OMP_TEAMS:
scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
break;
case GIMPLE_BIND:
{
tree var;
*handled_ops_p = false;
if (ctx)
for (var = gimple_bind_vars (as_a <gbind *> (stmt));
var ;
var = DECL_CHAIN (var))
insert_decl_map (&ctx->cb, var, var);
}
break;
default:
*handled_ops_p = false;
break;
}
return NULL_TREE;
}
/* Scan all the statements starting at the current statement. CTX
contains context information about the OMP directives and
clauses found during the scan. */
static void
scan_omp (gimple_seq *body_p, omp_context *ctx)
{
location_t saved_location;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
saved_location = input_location;
walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
input_location = saved_location;
}
/* Re-gimplification and code generation routines. */
/* Remove omp_member_access_dummy_var variables from gimple_bind_vars
of BIND if in a method. */
static void
maybe_remove_omp_member_access_dummy_vars (gbind *bind)
{
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
{
tree vars = gimple_bind_vars (bind);
for (tree *pvar = &vars; *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
gimple_bind_set_vars (bind, vars);
}
}
/* Remove omp_member_access_dummy_var variables from BLOCK_VARS of
block and its subblocks. */
static void
remove_member_access_dummy_vars (tree block)
{
for (tree *pvar = &BLOCK_VARS (block); *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
for (block = BLOCK_SUBBLOCKS (block); block; block = BLOCK_CHAIN (block))
remove_member_access_dummy_vars (block);
}
/* If a context was created for STMT when it was scanned, return it. */
static omp_context *
maybe_lookup_ctx (gimple *stmt)
{
splay_tree_node n;
n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
return n ? (omp_context *) n->value : NULL;
}
/* Find the mapping for DECL in CTX or the immediately enclosing
context that has a mapping for DECL.
If CTX is a nested parallel directive, we may have to use the decl
mappings created in CTX's parent context. Suppose that we have the
following parallel nesting (variable UIDs showed for clarity):
iD.1562 = 0;
#omp parallel shared(iD.1562) -> outer parallel
iD.1562 = iD.1562 + 1;
#omp parallel shared (iD.1562) -> inner parallel
iD.1562 = iD.1562 - 1;
Each parallel structure will create a distinct .omp_data_s structure
for copying iD.1562 in/out of the directive:
outer parallel .omp_data_s.1.i -> iD.1562
inner parallel .omp_data_s.2.i -> iD.1562
A shared variable mapping will produce a copy-out operation before
the parallel directive and a copy-in operation after it. So, in
this case we would have:
iD.1562 = 0;
.omp_data_o.1.i = iD.1562;
#omp parallel shared(iD.1562) -> outer parallel
.omp_data_i.1 = &.omp_data_o.1
.omp_data_i.1->i = .omp_data_i.1->i + 1;
.omp_data_o.2.i = iD.1562; -> **
#omp parallel shared(iD.1562) -> inner parallel
.omp_data_i.2 = &.omp_data_o.2
.omp_data_i.2->i = .omp_data_i.2->i - 1;
** This is a problem. The symbol iD.1562 cannot be referenced
inside the body of the outer parallel region. But since we are
emitting this copy operation while expanding the inner parallel
directive, we need to access the CTX structure of the outer
parallel directive to get the correct mapping:
.omp_data_o.2.i = .omp_data_i.1->i
Since there may be other workshare or parallel directives enclosing
the parallel directive, it may be necessary to walk up the context
parent chain. This is not a problem in general because nested
parallelism happens only rarely. */
static tree
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
gcc_assert (!ctx->is_nested || t || is_global_var (decl));
return t ? t : decl;
}
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
in outer contexts. */
static tree
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t = NULL;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
return t ? t : decl;
}
/* Construct the initialization value for reduction operation OP. */
tree
omp_reduction_init_op (location_t loc, enum tree_code op, tree type)
{
switch (op)
{
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
return build_zero_cst (type);
case MULT_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case EQ_EXPR:
return fold_convert_loc (loc, type, integer_one_node);
case BIT_AND_EXPR:
return fold_convert_loc (loc, type, integer_minus_one_node);
case MAX_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max, min;
if (HONOR_INFINITIES (type))
{
real_inf (&max);
real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
}
else
real_maxval (&min, 1, TYPE_MODE (type));
return build_real (type, min);
}
else if (POINTER_TYPE_P (type))
{
wide_int min
= wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, min);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MIN_VALUE (type);
}
case MIN_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max;
if (HONOR_INFINITIES (type))
real_inf (&max);
else
real_maxval (&max, 0, TYPE_MODE (type));
return build_real (type, max);
}
else if (POINTER_TYPE_P (type))
{
wide_int max
= wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, max);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MAX_VALUE (type);
}
default:
gcc_unreachable ();
}
}
/* Construct the initialization value for reduction CLAUSE. */
tree
omp_reduction_init (tree clause, tree type)
{
return omp_reduction_init_op (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_REDUCTION_CODE (clause), type);
}
/* Return alignment to be assumed for var in CLAUSE, which should be
OMP_CLAUSE_ALIGNED. */
static tree
omp_clause_aligned_alignment (tree clause)
{
if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
/* Otherwise return implementation defined alignment. */
unsigned int al = 1;
machine_mode mode, vmode;
int vs = targetm.vectorize.autovectorize_vector_sizes ();
if (vs)
vs = 1 << floor_log2 (vs);
static enum mode_class classes[]
= { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
for (int i = 0; i < 4; i += 2)
for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
mode != VOIDmode;
mode = GET_MODE_WIDER_MODE (mode))
{
vmode = targetm.vectorize.preferred_simd_mode (mode);
if (GET_MODE_CLASS (vmode) != classes[i + 1])
continue;
while (vs
&& GET_MODE_SIZE (vmode) < vs
&& GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
vmode = GET_MODE_2XWIDER_MODE (vmode);
tree type = lang_hooks.types.type_for_mode (mode, 1);
if (type == NULL_TREE || TYPE_MODE (type) != mode)
continue;
type = build_vector_type (type, GET_MODE_SIZE (vmode)
/ GET_MODE_SIZE (mode));
if (TYPE_MODE (type) != vmode)
continue;
if (TYPE_ALIGN_UNIT (type) > al)
al = TYPE_ALIGN_UNIT (type);
}
return build_int_cst (integer_type_node, al);
}
/* This structure is part of the interface between lower_rec_simd_input_clauses
and lower_rec_input_clauses. */
struct omplow_simd_context {
tree idx;
tree lane;
vec<tree, va_heap> simt_eargs;
gimple_seq simt_dlist;
int max_vf;
bool is_simt;
};
/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
privatization. */
static bool
lower_rec_simd_input_clauses (tree new_var, omp_context *ctx,
omplow_simd_context *sctx, tree &ivar, tree &lvar)
{
if (sctx->max_vf == 0)
{
sctx->max_vf = sctx->is_simt ? omp_max_simt_vf () : omp_max_vf ();
if (sctx->max_vf > 1)
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
if (c
&& (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST
|| tree_int_cst_sgn (OMP_CLAUSE_SAFELEN_EXPR (c)) != 1))
sctx->max_vf = 1;
else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
sctx->max_vf) == -1)
sctx->max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
}
if (sctx->max_vf > 1)
{
sctx->idx = create_tmp_var (unsigned_type_node);
sctx->lane = create_tmp_var (unsigned_type_node);
}
}
if (sctx->max_vf == 1)
return false;
if (sctx->is_simt)
{
if (is_gimple_reg (new_var))
{
ivar = lvar = new_var;
return true;
}
tree type = TREE_TYPE (new_var), ptype = build_pointer_type (type);
ivar = lvar = create_tmp_var (type);
TREE_ADDRESSABLE (ivar) = 1;
DECL_ATTRIBUTES (ivar) = tree_cons (get_identifier ("omp simt private"),
NULL, DECL_ATTRIBUTES (ivar));
sctx->simt_eargs.safe_push (build1 (ADDR_EXPR, ptype, ivar));
tree clobber = build_constructor (type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple *g = gimple_build_assign (ivar, clobber);
gimple_seq_add_stmt (&sctx->simt_dlist, g);
}
else
{
tree atype = build_array_type_nelts (TREE_TYPE (new_var), sctx->max_vf);
tree avar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (avar) = 1;
DECL_ATTRIBUTES (avar)
= tree_cons (get_identifier ("omp simd array"), NULL,
DECL_ATTRIBUTES (avar));
gimple_add_tmp_var (avar);
ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->idx,
NULL_TREE, NULL_TREE);
lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->lane,
NULL_TREE, NULL_TREE);
}
if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, lvar);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
return true;
}
/* Helper function of lower_rec_input_clauses. For a reference
in simd reduction, add an underlying variable it will reference. */
static void
handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
{
tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
if (TREE_CONSTANT (z))
{
z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)),
get_name (new_vard));
gimple_add_tmp_var (z);
TREE_ADDRESSABLE (z) = 1;
z = build_fold_addr_expr_loc (loc, z);
gimplify_assign (new_vard, z, ilist);
}
}
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
from the receiver (aka child) side and initializers for REFERENCE_TYPE
private variables. Initialization statements go in ILIST, while calls
to destructors go in DLIST. */
static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
omp_context *ctx, struct omp_for_data *fd)
{
tree c, dtor, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
bool reduction_omp_orig_ref = false;
int pass;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
omplow_simd_context sctx = omplow_simd_context ();
tree simt_lane = NULL_TREE, simtrec = NULL_TREE;
tree ivar = NULL_TREE, lvar = NULL_TREE, uid = NULL_TREE;
gimple_seq llist[3] = { };
copyin_seq = NULL;
sctx.is_simt = is_simd && omp_find_clause (clauses, OMP_CLAUSE__SIMT_);
/* Set max_vf=1 (which will later enforce safelen=1) in simd loops
with data sharing clauses referencing variable sized vars. That
is unnecessarily hard to support and very unlikely to result in
vectorized code anyway. */
if (is_simd)
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LINEAR:
if (OMP_CLAUSE_LINEAR_ARRAY (c))
sctx.max_vf = 1;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
if (is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
break;
case OMP_CLAUSE_REDUCTION:
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
|| is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
break;
default:
continue;
}
/* Add a placeholder for simduid. */
if (sctx.is_simt && sctx.max_vf != 1)
sctx.simt_eargs.safe_push (NULL_TREE);
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
the variable sized types are processed before we use them in the
variable sized operations. */
for (pass = 0; pass < 2; ++pass)
{
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (c_kind)
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_DEBUG (c))
continue;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
continue;
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)
|| is_global_var (OMP_CLAUSE_DECL (c)));
continue;
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
reduction_omp_orig_ref = true;
break;
case OMP_CLAUSE__LOOPTEMP_:
/* Handle _looptemp_ clauses only on parallel/task. */
if (fd)
continue;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
lastprivate_firstprivate = true;
if (pass != 0 || is_taskloop_ctx (ctx))
continue;
}
/* Even without corresponding firstprivate, if
decl is Fortran allocatable, it needs outer var
reference. */
else if (pass == 0
&& lang_hooks.decls.omp_private_outer_ref
(OMP_CLAUSE_DECL (c)))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_ALIGNED:
if (pass == 0)
continue;
var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
&& !is_global_var (var))
{
new_var = maybe_lookup_decl (var, ctx);
if (new_var == NULL_TREE)
new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
x = build_call_expr_loc (clause_loc, x, 2, new_var, alarg);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (var))
{
tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
new_var = lookup_decl (var, ctx);
t = maybe_lookup_decl_in_outer_ctx (var, ctx);
t = build_fold_addr_expr_loc (clause_loc, t);
t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
t = build_call_expr_loc (clause_loc, t2, 2, t, alarg);
t = fold_convert_loc (clause_loc, ptype, t);
x = create_tmp_var (ptype);
t = build2 (MODIFY_EXPR, ptype, x, t);
gimplify_and_add (t, ilist);
t = build_simple_mem_ref_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (new_var, t);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
default:
continue;
}
new_var = var = OMP_CLAUSE_DECL (c);
if (c_kind == OMP_CLAUSE_REDUCTION && TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == INDIRECT_REF
|| TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
new_var = var;
}
if (c_kind != OMP_CLAUSE_COPYIN)
new_var = lookup_decl (var, ctx);
if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
{
if (pass != 0)
continue;
}
/* C/C++ array section reductions. */
else if (c_kind == OMP_CLAUSE_REDUCTION
&& var != OMP_CLAUSE_DECL (c))
{
if (pass == 0)
continue;
tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc,
TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
orig_var = TREE_OPERAND (orig_var, 0);
}
if (TREE_CODE (orig_var) == INDIRECT_REF
|| TREE_CODE (orig_var) == ADDR_EXPR)
orig_var = TREE_OPERAND (orig_var, 0);
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
const char *name = get_name (orig_var);
if (TREE_CONSTANT (v))
{
x = create_tmp_var_raw (type, name);
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, ilist, NULL, is_gimple_val, fb_rvalue);
t = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (v), v,
build_int_cst (TREE_TYPE (v), 1));
t = fold_build2_loc (clause_loc, MULT_EXPR,
TREE_TYPE (v), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
tree al = size_int (TYPE_ALIGN (TREE_TYPE (type)));
x = build_call_expr_loc (clause_loc, atmp, 2, t, al);
}
tree ptype = build_pointer_type (TREE_TYPE (type));
x = fold_convert_loc (clause_loc, ptype, x);
tree y = create_tmp_var (ptype, name);
gimplify_assign (y, x, ilist);
x = y;
tree yb = y;
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, pointer_sized_int_node,
bias);
yb = fold_convert_loc (clause_loc, pointer_sized_int_node,
x);
yb = fold_build2_loc (clause_loc, MINUS_EXPR,
pointer_sized_int_node, yb, bias);
x = fold_convert_loc (clause_loc, TREE_TYPE (x), yb);
yb = create_tmp_var (ptype, name);
gimplify_assign (yb, x, ilist);
x = yb;
}
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var != var)
{
gcc_assert (is_variable_sized (orig_var));
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var),
x);
gimplify_assign (new_var, x, ilist);
tree new_orig_var = lookup_decl (orig_var, ctx);
tree t = build_fold_indirect_ref (new_var);
DECL_IGNORED_P (new_var) = 0;
TREE_THIS_NOTRAP (t);
SET_DECL_VALUE_EXPR (new_orig_var, t);
DECL_HAS_VALUE_EXPR_P (new_orig_var) = 1;
}
else
{
x = build2 (MEM_REF, TREE_TYPE (new_var), x,
build_int_cst (ptype, 0));
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
}
else
{
gcc_assert (orig_var == var);
if (TREE_CODE (d) == INDIRECT_REF)
{
x = create_tmp_var (ptype, name);
TREE_ADDRESSABLE (x) = 1;
gimplify_assign (x, yb, ilist);
x = build_fold_addr_expr_loc (clause_loc, x);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
tree y1 = create_tmp_var (ptype, NULL);
gimplify_assign (y1, y, ilist);
tree i2 = NULL_TREE, y2 = NULL_TREE;
tree body2 = NULL_TREE, end2 = NULL_TREE;
tree y3 = NULL_TREE, y4 = NULL_TREE;
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
{
y2 = create_tmp_var (ptype, NULL);
gimplify_assign (y2, y, ilist);
tree ref = build_outer_var_ref (var, ctx);
/* For ref build_outer_var_ref already performs this. */
if (TREE_CODE (d) == INDIRECT_REF)
gcc_assert (omp_is_reference (var));
else if (TREE_CODE (d) == ADDR_EXPR)
ref = build_fold_addr_expr (ref);
else if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
ref = fold_convert_loc (clause_loc, ptype, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
&& OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
y3 = create_tmp_var (ptype, NULL);
gimplify_assign (y3, unshare_expr (ref), ilist);
}
if (is_simd)
{
y4 = create_tmp_var (ptype, NULL);
gimplify_assign (y4, ref, dlist);
}
}
tree i = create_tmp_var (TREE_TYPE (v), NULL);
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), ilist);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (ilist, gimple_build_label (body));
if (y2)
{
i2 = create_tmp_var (TREE_TYPE (v), NULL);
gimplify_assign (i2, build_int_cst (TREE_TYPE (v), 0), dlist);
body2 = create_artificial_label (UNKNOWN_LOCATION);
end2 = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_label (body2));
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y1));
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
SET_DECL_VALUE_EXPR (placeholder,
y3 ? build_simple_mem_ref (y3)
: error_mark_node);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
x = lang_hooks.decls.omp_clause_default_ctor
(c, build_simple_mem_ref (y1),
y3 ? build_simple_mem_ref (y3) : NULL_TREE);
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y2));
SET_DECL_VALUE_EXPR (placeholder,
build_simple_mem_ref (y4));
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 0;
x = lang_hooks.decls.omp_clause_dtor
(c, build_simple_mem_ref (y2));
if (x)
{
gimple_seq tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (dlist, tseq);
}
}
else
{
x = omp_reduction_init (c, TREE_TYPE (type));
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
gimplify_assign (build_simple_mem_ref (y1), x, ilist);
if (is_simd)
{
x = build2 (code, TREE_TYPE (type),
build_simple_mem_ref (y4),
build_simple_mem_ref (y2));
gimplify_assign (build_simple_mem_ref (y4), x, dlist);
}
}
gimple *g
= gimple_build_assign (y1, POINTER_PLUS_EXPR, y1,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
if (y3)
{
g = gimple_build_assign (y3, POINTER_PLUS_EXPR, y3,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
}
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (ilist, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist, gimple_build_label (end));
if (y2)
{
g = gimple_build_assign (y2, POINTER_PLUS_EXPR, y2,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
if (y4)
{
g = gimple_build_assign
(y4, POINTER_PLUS_EXPR, y4,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
}
g = gimple_build_assign (i2, PLUS_EXPR, i2,
build_int_cst (TREE_TYPE (i2), 1));
gimple_seq_add_stmt (dlist, g);
g = gimple_build_cond (LE_EXPR, i2, v, body2, end2);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end2));
}
continue;
}
else if (is_variable_sized (var))
{
/* For variable sized types, we need to allocate the
actual storage here. Call alloca and store the
result in the pointer decl that we created elsewhere. */
if (pass == 0)
continue;
if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
{
gcall *stmt;
tree tmp, atmp;
ptr = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
ptr = TREE_OPERAND (ptr, 0);
gcc_assert (DECL_P (ptr));
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
/* void *tmp = __builtin_alloca */
atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
stmt = gimple_build_call (atmp, 2, x,
size_int (DECL_ALIGN (var)));
tmp = create_tmp_var_raw (ptr_type_node);
gimple_add_tmp_var (tmp);
gimple_call_set_lhs (stmt, tmp);
gimple_seq_add_stmt (ilist, stmt);
x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
gimplify_assign (ptr, x, ilist);
}
}
else if (omp_is_reference (var))
{
/* For references that are being privatized for Fortran,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
a direct variable. */
if (pass == 0)
continue;
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
x = build_fold_addr_expr_loc (clause_loc, x);
}
else if (TREE_CONSTANT (x))
{
/* For reduction in SIMD loop, defer adding the
initialization of the reference, because if we decide
to use SIMD array for it, the initilization could cause
expansion ICE. */
if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
x = NULL_TREE;
else
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
if (x)
{
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
else if (c_kind == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
if (pass == 0)
continue;
}
else if (pass != 0)
continue;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
continue;
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
/* For taskloop firstprivate/lastprivate, represented
as firstprivate and shared clause on the task, new_var
is the firstprivate var. */
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
x = build_outer_var_ref (var, ctx);
else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
{
if (is_task_ctx (ctx))
x = build_receiver_ref (var, false, ctx);
else
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_PRIVATE);
}
else
x = NULL;
do_private:
tree nx;
nx = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (new_var), x);
if (is_simd)
{
tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
if ((TREE_ADDRESSABLE (new_var) || nx || y
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (nx)
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar), x);
if (nx && x)
gimplify_and_add (x, &llist[0]);
if (y)
{
y = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (y)
{
gimple_seq tseq = NULL;
dtor = y;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (&llist[1], tseq);
}
}
break;
}
}
if (nx)
gimplify_and_add (nx, ilist);
/* FALLTHRU */
do_dtor:
x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
{
gimple_seq tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (dlist, tseq);
}
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
goto do_firstprivate;
if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
x = NULL;
else
x = build_outer_var_ref (var, ctx);
goto do_private;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_task_ctx (ctx))
{
if (omp_is_reference (var) || is_variable_sized (var))
goto do_dtor;
else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
|| use_pointer_for_field (var, NULL))
{
x = build_receiver_ref (var, false, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
goto do_dtor;
}
}
do_firstprivate:
x = build_outer_var_ref (var, ctx);
if (is_simd)
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& gimple_omp_for_combined_into_p (ctx->stmt))
{
tree t = OMP_CLAUSE_LINEAR_STEP (c);
tree stept = TREE_TYPE (t);
tree ct = omp_find_clause (clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (ct);
tree l = OMP_CLAUSE_DECL (ct);
tree n1 = fd->loop.n1;
tree step = fd->loop.step;
tree itype = TREE_TYPE (l);
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
l = fold_build2 (MINUS_EXPR, itype, l, n1);
if (TYPE_UNSIGNED (itype)
&& fd->loop.cond_code == GT_EXPR)
l = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, l),
fold_build1 (NEGATE_EXPR,
itype, step));
else
l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
t = fold_build2 (MULT_EXPR, stept,
fold_convert (stept, l), t);
if (OMP_CLAUSE_LINEAR_ARRAY (c))
{
x = lang_hooks.decls.omp_clause_linear_ctor
(c, new_var, x, t);
gimplify_and_add (x, ilist);
goto do_dtor;
}
if (POINTER_TYPE_P (TREE_TYPE (x)))
x = fold_build2 (POINTER_PLUS_EXPR,
TREE_TYPE (x), x, t);
else
x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
}
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
|| TREE_ADDRESSABLE (new_var))
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
{
tree iv = create_tmp_var (TREE_TYPE (new_var));
x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
gimplify_and_add (x, ilist);
gimple_stmt_iterator gsi
= gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gassign *g
= gimple_build_assign (unshare_expr (lvar), iv);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
tree t = OMP_CLAUSE_LINEAR_STEP (c);
enum tree_code code = PLUS_EXPR;
if (POINTER_TYPE_P (TREE_TYPE (new_var)))
code = POINTER_PLUS_EXPR;
g = gimple_build_assign (iv, code, iv, t);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
break;
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (ivar), x);
gimplify_and_add (x, &llist[0]);
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
{
gimple_seq tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (&llist[1], tseq);
}
break;
}
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (new_var), x);
gimplify_and_add (x, ilist);
goto do_dtor;
case OMP_CLAUSE__LOOPTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
x = build_outer_var_ref (var, ctx);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
break;
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, ©in_seq);
copyin_by_ref |= by_ref;
break;
case OMP_CLAUSE_REDUCTION:
/* OpenACC reductions are initialized using the
GOACC_REDUCTION internal function. */
if (is_gimple_omp_oacc (ctx->stmt))
break;
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
gimple *tseq;
x = build_outer_var_ref (var, ctx);
if (omp_is_reference (var)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (x)))
x = build_fold_addr_expr_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
tree new_vard = new_var;
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (new_vard == new_var)
{
gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
SET_DECL_VALUE_EXPR (new_var, ivar);
}
else
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (ivar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar),
build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, &llist[0]);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[0], tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[1], tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (new_vard == new_var)
SET_DECL_VALUE_EXPR (new_var, lvar);
else
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
{
tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (&llist[1], tseq);
}
break;
}
/* If this is a reference to constant size reduction var
with placeholder, we haven't emitted the initializer
for it because it is undesirable if SIMD arrays are used.
But if they aren't used, we need to emit the deferred
initialization now. */
else if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (new_var),
build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
goto do_dtor;
}
else
{
x = omp_reduction_init (c, TREE_TYPE (new_var));
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
tree new_vard = new_var;
if (is_simd && omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
tree ref = build_outer_var_ref (var, ctx);
gimplify_assign (unshare_expr (ivar), x, &llist[0]);
if (sctx.is_simt)
{
if (!simt_lane)
simt_lane = create_tmp_var (unsigned_type_node);
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_BFLY,
TREE_TYPE (ivar), 2, ivar, simt_lane);
x = build2 (code, TREE_TYPE (ivar), ivar, x);
gimplify_assign (ivar, x, &llist[2]);
}
x = build2 (code, TREE_TYPE (ref), ref, ivar);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &llist[1]);
if (new_vard != new_var)
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
}
else
{
if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
gimplify_assign (new_var, x, ilist);
if (is_simd)
{
tree ref = build_outer_var_ref (var, ctx);
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, dlist);
}
}
}
break;
default:
gcc_unreachable ();
}
}
}
if (sctx.max_vf == 1)
sctx.is_simt = false;
if (sctx.lane || sctx.is_simt)
{
uid = create_tmp_var (ptr_type_node, "simduid");
/* Don't want uninit warnings on simduid, it is always uninitialized,
but we use it not for the value, but for the DECL_UID only. */
TREE_NO_WARNING (uid) = 1;
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
OMP_CLAUSE__SIMDUID__DECL (c) = uid;
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
/* Emit calls denoting privatized variables and initializing a pointer to
structure that holds private variables as fields after ompdevlow pass. */
if (sctx.is_simt)
{
sctx.simt_eargs[0] = uid;
gimple *g
= gimple_build_call_internal_vec (IFN_GOMP_SIMT_ENTER, sctx.simt_eargs);
gimple_call_set_lhs (g, uid);
gimple_seq_add_stmt (ilist, g);
sctx.simt_eargs.release ();
simtrec = create_tmp_var (ptr_type_node, ".omp_simt");
g = gimple_build_call_internal (IFN_GOMP_SIMT_ENTER_ALLOC, 1, uid);
gimple_call_set_lhs (g, simtrec);
gimple_seq_add_stmt (ilist, g);
}
if (sctx.lane)
{
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
gimple_call_set_lhs (g, sctx.lane);
gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (sctx.lane, INTEGER_CST,
build_int_cst (unsigned_type_node, 0));
gimple_seq_add_stmt (ilist, g);
/* Emit reductions across SIMT lanes in log_2(simt_vf) steps. */
if (llist[2])
{
tree simt_vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VF, 0);
gimple_call_set_lhs (g, simt_vf);
gimple_seq_add_stmt (dlist, g);
tree t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (simt_lane, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_goto (header));
gimple_seq_add_stmt (dlist, gimple_build_label (body));
gimple_seq_add_seq (dlist, llist[2]);
g = gimple_build_assign (simt_lane, LSHIFT_EXPR, simt_lane, integer_one_node);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, simt_lane, simt_vf, body, end);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end));
}
for (int i = 0; i < 2; i++)
if (llist[i])
{
tree vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
gimple_call_set_lhs (g, vf);
gimple_seq *seq = i == 0 ? ilist : dlist;
gimple_seq_add_stmt (seq, g);
tree t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (seq, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (seq, gimple_build_goto (header));
gimple_seq_add_stmt (seq, gimple_build_label (body));
gimple_seq_add_seq (seq, llist[i]);
t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (sctx.idx, PLUS_EXPR, sctx.idx, t);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, sctx.idx, vf, body, end);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (end));
}
}
if (sctx.is_simt)
{
gimple_seq_add_seq (dlist, sctx.simt_dlist);
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMT_EXIT, 1, simtrec);
gimple_seq_add_stmt (dlist, g);
}
/* The copyin sequence is not to be executed by the main thread, since
that would result in self-copies. Perhaps not visible to scalars,
but it certainly is to C++ operator=. */
if (copyin_seq)
{
x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
0);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
gimplify_and_add (x, ilist);
}
/* If any copyin variable is passed by reference, we must ensure the
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. And similarly
for UDRs if initializer expression refers to omp_orig. */
if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
{
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
gimple_seq_add_stmt (ilist, omp_build_barrier (NULL_TREE));
}
/* If max_vf is non-zero, then we can use only a vectorization factor
up to the max_vf we chose. So stick it into the safelen clause. */
if (sctx.max_vf)
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
if (c == NULL_TREE
|| (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
&& compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
sctx.max_vf) == 1))
{
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
sctx.max_vf);
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
}
}
/* Generate code to implement the LASTPRIVATE clauses. This is used for
both parallel and workshare constructs. PREDICATE may be NULL if it's
always true. */
static void
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
omp_context *ctx)
{
tree x, c, label = NULL, orig_clauses = clauses;
bool par_clauses = false;
tree simduid = NULL, lastlane = NULL, simtcond = NULL, simtlast = NULL;
/* Early exit if there are no lastprivate or linear clauses. */
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
break;
if (clauses == NULL)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, look for the clauses on the
parallel statement itself. */
if (is_parallel_ctx (ctx))
return;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
return;
clauses = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
return;
par_clauses = true;
}
bool maybe_simt = false;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
{
maybe_simt = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMT_);
simduid = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
if (simduid)
simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
}
if (predicate)
{
gcond *stmt;
tree label_true, arm1, arm2;
enum tree_code pred_code = TREE_CODE (predicate);
label = create_artificial_label (UNKNOWN_LOCATION);
label_true = create_artificial_label (UNKNOWN_LOCATION);
if (TREE_CODE_CLASS (pred_code) == tcc_comparison)
{
arm1 = TREE_OPERAND (predicate, 0);
arm2 = TREE_OPERAND (predicate, 1);
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
}
else
{
arm1 = predicate;
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
arm2 = boolean_false_node;
pred_code = NE_EXPR;
}
if (maybe_simt)
{
c = build2 (pred_code, boolean_type_node, arm1, arm2);
c = fold_convert (integer_type_node, c);
simtcond = create_tmp_var (integer_type_node);
gimplify_assign (simtcond, c, stmt_list);
gcall *g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY,
1, simtcond);
c = create_tmp_var (integer_type_node);
gimple_call_set_lhs (g, c);
gimple_seq_add_stmt (stmt_list, g);
stmt = gimple_build_cond (NE_EXPR, c, integer_zero_node,
label_true, label);
}
else
stmt = gimple_build_cond (pred_code, arm1, arm2, label_true, label);
gimple_seq_add_stmt (stmt_list, stmt);
gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
}
for (c = clauses; c ;)
{
tree var, new_var;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
{
var = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
&& is_taskloop_ctx (ctx))
{
gcc_checking_assert (ctx->outer && is_task_ctx (ctx->outer));
new_var = lookup_decl (var, ctx->outer);
}
else
{
new_var = lookup_decl (var, ctx);
/* Avoid uninitialized warnings for lastprivate and
for linear iterators. */
if (predicate
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| OMP_CLAUSE_LINEAR_NO_COPYIN (c)))
TREE_NO_WARNING (new_var) = 1;
}
if (!maybe_simt && simduid && DECL_HAS_VALUE_EXPR_P (new_var))
{
tree val = DECL_VALUE_EXPR (new_var);
if (TREE_CODE (val) == ARRAY_REF
&& VAR_P (TREE_OPERAND (val, 0))
&& lookup_attribute ("omp simd array",
DECL_ATTRIBUTES (TREE_OPERAND (val,
0))))
{
if (lastlane == NULL)
{
lastlane = create_tmp_var (unsigned_type_node);
gcall *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2, simduid,
TREE_OPERAND (val, 1));
gimple_call_set_lhs (g, lastlane);
gimple_seq_add_stmt (stmt_list, g);
}
new_var = build4 (ARRAY_REF, TREE_TYPE (val),
TREE_OPERAND (val, 0), lastlane,
NULL_TREE, NULL_TREE);
}
}
else if (maybe_simt)
{
tree val = (DECL_HAS_VALUE_EXPR_P (new_var)
? DECL_VALUE_EXPR (new_var)
: new_var);
if (simtlast == NULL)
{
simtlast = create_tmp_var (unsigned_type_node);
gcall *g = gimple_build_call_internal
(IFN_GOMP_SIMT_LAST_LANE, 1, simtcond);
gimple_call_set_lhs (g, simtlast);
gimple_seq_add_stmt (stmt_list, g);
}
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_IDX,
TREE_TYPE (val), 2, val, simtlast);
new_var = unshare_expr (new_var);
gimplify_assign (new_var, x, stmt_list);
new_var = unshare_expr (new_var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (stmt_list,
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (stmt_list,
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
}
x = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV (c))
{
gcc_checking_assert (is_taskloop_ctx (ctx));
tree ovar = maybe_lookup_decl_in_outer_ctx (var,
ctx->outer->outer);
if (is_global_var (ovar))
x = ovar;
}
if (!x)
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_LASTPRIVATE);
if (omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
gimplify_and_add (x, stmt_list);
}
c = OMP_CLAUSE_CHAIN (c);
if (c == NULL && !par_clauses)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, continue looking for the
clauses also on the parallel statement itself. */
if (is_parallel_ctx (ctx))
break;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
break;
c = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
par_clauses = true;
}
}
if (label)
gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
}
/* Lower the OpenACC reductions of CLAUSES for compute axis LEVEL
(which might be a placeholder). INNER is true if this is an inner
axis of a multi-axis loop. FORK and JOIN are (optional) fork and
join markers. Generate the before-loop forking sequence in
FORK_SEQ and the after-loop joining sequence to JOIN_SEQ. The
general form of these sequences is
GOACC_REDUCTION_SETUP
GOACC_FORK
GOACC_REDUCTION_INIT
...
GOACC_REDUCTION_FINI
GOACC_JOIN
GOACC_REDUCTION_TEARDOWN. */
static void
lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner,
gcall *fork, gcall *join, gimple_seq *fork_seq,
gimple_seq *join_seq, omp_context *ctx)
{
gimple_seq before_fork = NULL;
gimple_seq after_fork = NULL;
gimple_seq before_join = NULL;
gimple_seq after_join = NULL;
tree init_code = NULL_TREE, fini_code = NULL_TREE,
setup_code = NULL_TREE, teardown_code = NULL_TREE;
unsigned offset = 0;
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
tree orig = OMP_CLAUSE_DECL (c);
tree var = maybe_lookup_decl (orig, ctx);
tree ref_to_res = NULL_TREE;
tree incoming, outgoing, v1, v2, v3;
bool is_private = false;
enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
if (rcode == MINUS_EXPR)
rcode = PLUS_EXPR;
else if (rcode == TRUTH_ANDIF_EXPR)
rcode = BIT_AND_EXPR;
else if (rcode == TRUTH_ORIF_EXPR)
rcode = BIT_IOR_EXPR;
tree op = build_int_cst (unsigned_type_node, rcode);
if (!var)
var = orig;
incoming = outgoing = var;
if (!inner)
{
/* See if an outer construct also reduces this variable. */
omp_context *outer = ctx;
while (omp_context *probe = outer->outer)
{
enum gimple_code type = gimple_code (probe->stmt);
tree cls;
switch (type)
{
case GIMPLE_OMP_FOR:
cls = gimple_omp_for_clauses (probe->stmt);
break;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (probe->stmt)
!= GF_OMP_TARGET_KIND_OACC_PARALLEL)
goto do_lookup;
cls = gimple_omp_target_clauses (probe->stmt);
break;
default:
goto do_lookup;
}
outer = probe;
for (; cls; cls = OMP_CLAUSE_CHAIN (cls))
if (OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_REDUCTION
&& orig == OMP_CLAUSE_DECL (cls))
{
incoming = outgoing = lookup_decl (orig, probe);
goto has_outer_reduction;
}
else if ((OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_PRIVATE)
&& orig == OMP_CLAUSE_DECL (cls))
{
is_private = true;
goto do_lookup;
}
}
do_lookup:
/* This is the outermost construct with this reduction,
see if there's a mapping for it. */
if (gimple_code (outer->stmt) == GIMPLE_OMP_TARGET
&& maybe_lookup_field (orig, outer) && !is_private)
{
ref_to_res = build_receiver_ref (orig, false, outer);
if (omp_is_reference (orig))
ref_to_res = build_simple_mem_ref (ref_to_res);
tree type = TREE_TYPE (var);
if (POINTER_TYPE_P (type))
type = TREE_TYPE (type);
outgoing = var;
incoming = omp_reduction_init_op (loc, rcode, type);
}
else
{
/* Try to look at enclosing contexts for reduction var,
use original if no mapping found. */
tree t = NULL_TREE;
omp_context *c = ctx->outer;
while (c && !t)
{
t = maybe_lookup_decl (orig, c);
c = c->outer;
}
incoming = outgoing = (t ? t : orig);
}
has_outer_reduction:;
}
if (!ref_to_res)
ref_to_res = integer_zero_node;
if (omp_is_reference (orig))
{
tree type = TREE_TYPE (var);
const char *id = IDENTIFIER_POINTER (DECL_NAME (var));
if (!inner)
{
tree x = create_tmp_var (TREE_TYPE (type), id);
gimplify_assign (var, build_fold_addr_expr (x), fork_seq);
}
v1 = create_tmp_var (type, id);
v2 = create_tmp_var (type, id);
v3 = create_tmp_var (type, id);
gimplify_assign (v1, var, fork_seq);
gimplify_assign (v2, var, fork_seq);
gimplify_assign (v3, var, fork_seq);
var = build_simple_mem_ref (var);
v1 = build_simple_mem_ref (v1);
v2 = build_simple_mem_ref (v2);
v3 = build_simple_mem_ref (v3);
outgoing = build_simple_mem_ref (outgoing);
if (!TREE_CONSTANT (incoming))
incoming = build_simple_mem_ref (incoming);
}
else
v1 = v2 = v3 = var;
/* Determine position in reduction buffer, which may be used
by target. */
enum machine_mode mode = TYPE_MODE (TREE_TYPE (var));
unsigned align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
offset = (offset + align - 1) & ~(align - 1);
tree off = build_int_cst (sizetype, offset);
offset += GET_MODE_SIZE (mode);
if (!init_code)
{
init_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_INIT);
fini_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_FINI);
setup_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_SETUP);
teardown_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_TEARDOWN);
}
tree setup_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, setup_code,
unshare_expr (ref_to_res),
incoming, level, op, off);
tree init_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, init_code,
unshare_expr (ref_to_res),
v1, level, op, off);
tree fini_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, fini_code,
unshare_expr (ref_to_res),
v2, level, op, off);
tree teardown_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, teardown_code,
ref_to_res, v3, level, op, off);
gimplify_assign (v1, setup_call, &before_fork);
gimplify_assign (v2, init_call, &after_fork);
gimplify_assign (v3, fini_call, &before_join);
gimplify_assign (outgoing, teardown_call, &after_join);
}
/* Now stitch things together. */
gimple_seq_add_seq (fork_seq, before_fork);
if (fork)
gimple_seq_add_stmt (fork_seq, fork);
gimple_seq_add_seq (fork_seq, after_fork);
gimple_seq_add_seq (join_seq, before_join);
if (join)
gimple_seq_add_stmt (join_seq, join);
gimple_seq_add_seq (join_seq, after_join);
}
/* Generate code to implement the REDUCTION clauses. */
static void
lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
{
gimple_seq sub_seq = NULL;
gimple *stmt;
tree x, c;
int count = 0;
/* OpenACC loop reductions are handled elsewhere. */
if (is_gimple_omp_oacc (ctx->stmt))
return;
/* SIMD reductions are handled in lower_rec_input_clauses. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
return;
/* First see if there is exactly one reduction clause. Use OMP_ATOMIC
update in that case, otherwise use a lock. */
for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
|| TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
/* Never use OMP_ATOMIC for array reductions or UDRs. */
count = -1;
break;
}
count++;
}
if (count == 0)
return;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, new_var, orig_var;
enum tree_code code;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
continue;
enum omp_clause_code ccode = OMP_CLAUSE_REDUCTION;
orig_var = var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
else
{
/* If this is a pointer or referenced based array
section, the var could be private in the outer
context e.g. on orphaned loop construct. Pretend this
is private variable's outer reference. */
ccode = OMP_CLAUSE_PRIVATE;
if (TREE_CODE (var) == INDIRECT_REF)
var = TREE_OPERAND (var, 0);
}
orig_var = var;
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
}
new_var = lookup_decl (var, ctx);
if (var == OMP_CLAUSE_DECL (c) && omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
ref = build_outer_var_ref (var, ctx, ccode);
code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (count == 1)
{
tree addr = build_fold_addr_expr_loc (clause_loc, ref);
addr = save_expr (addr);
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
gimplify_and_add (x, stmt_seqp);
return;
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
tree i = create_tmp_var (TREE_TYPE (v), NULL);
tree ptype = build_pointer_type (TREE_TYPE (type));
tree bias = TREE_OPERAND (d, 1);
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc, TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
d = TREE_OPERAND (d, 0);
}
/* For ref build_outer_var_ref already performs this, so
only new_var needs a dereference. */
if (TREE_CODE (d) == INDIRECT_REF)
{
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
gcc_assert (omp_is_reference (var) && var == orig_var);
}
else if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var == var)
{
new_var = build_fold_addr_expr (new_var);
ref = build_fold_addr_expr (ref);
}
}
else
{
gcc_assert (orig_var == var);
if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
}
if (DECL_P (v))
{
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, stmt_seqp, NULL, is_gimple_val, fb_rvalue);
}
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, sizetype, bias);
new_var = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (new_var), new_var,
unshare_expr (bias));
ref = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (ref), ref, bias);
}
new_var = fold_convert_loc (clause_loc, ptype, new_var);
ref = fold_convert_loc (clause_loc, ptype, ref);
tree m = create_tmp_var (ptype, NULL);
gimplify_assign (m, new_var, stmt_seqp);
new_var = m;
m = create_tmp_var (ptype, NULL);
gimplify_assign (m, ref, stmt_seqp);
ref = m;
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), stmt_seqp);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (body));
tree priv = build_simple_mem_ref_loc (clause_loc, new_var);
tree out = build_simple_mem_ref_loc (clause_loc, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (placeholder, out);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
SET_DECL_VALUE_EXPR (decl_placeholder, priv);
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq,
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (out), out, priv);
out = unshare_expr (out);
gimplify_assign (out, x, &sub_seq);
}
gimple *g = gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (&sub_seq, g);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (end));
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (omp_is_reference (var)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &sub_seq);
}
}
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
gimple_seq_add_seq (stmt_seqp, sub_seq);
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
}
/* Generate code to implement the COPYPRIVATE clauses. */
static void
lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, new_var, ref, x;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
continue;
var = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (var, NULL);
ref = build_sender_ref (var, ctx);
x = new_var = lookup_decl_in_outer_ctx (var, ctx);
if (by_ref)
{
x = build_fold_addr_expr_loc (clause_loc, new_var);
x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
}
gimplify_assign (ref, x, slist);
ref = build_receiver_ref (var, false, ctx);
if (by_ref)
{
ref = fold_convert_loc (clause_loc,
build_pointer_type (TREE_TYPE (new_var)),
ref);
ref = build_fold_indirect_ref_loc (clause_loc, ref);
}
if (omp_is_reference (var))
{
ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
ref = build_simple_mem_ref_loc (clause_loc, ref);
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
gimplify_and_add (x, rlist);
}
}
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
and REDUCTION from the sender (aka parent) side. */
static void
lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
omp_context *ctx)
{
tree c, t;
int ignored_looptemp = 0;
bool is_taskloop = false;
/* For taskloop, ignore first two _looptemp_ clauses, those are initialized
by GOMP_taskloop. */
if (is_task_ctx (ctx) && gimple_omp_task_taskloop_p (ctx->stmt))
{
ignored_looptemp = 2;
is_taskloop = true;
}
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree val, ref, x, var;
bool by_ref, do_in = false, do_out = false;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
continue;
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
break;
case OMP_CLAUSE_SHARED:
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
continue;
case OMP_CLAUSE__LOOPTEMP_:
if (ignored_looptemp)
{
ignored_looptemp--;
continue;
}
break;
default:
continue;
}
val = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& TREE_CODE (val) == MEM_REF)
{
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == POINTER_PLUS_EXPR)
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == INDIRECT_REF
|| TREE_CODE (val) == ADDR_EXPR)
val = TREE_OPERAND (val, 0);
if (is_variable_sized (val))
continue;
}
/* For OMP_CLAUSE_SHARED_FIRSTPRIVATE, look beyond the
outer taskloop region. */
omp_context *ctx_for_o = ctx;
if (is_taskloop
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
ctx_for_o = ctx->outer;
var = lookup_decl_in_outer_ctx (val, ctx_for_o);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
&& is_global_var (var))
continue;
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx_for_o);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
{
/* Handle taskloop firstprivate/lastprivate, where the
lastprivate on GIMPLE_OMP_TASK is represented as
OMP_CLAUSE_SHARED_FIRSTPRIVATE. */
tree f = lookup_sfield ((splay_tree_key) &DECL_UID (val), ctx);
x = omp_build_component_ref (ctx->sender_decl, f);
if (use_pointer_for_field (val, ctx))
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
DECL_ABSTRACT_ORIGIN (f) = NULL;
continue;
}
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| val == OMP_CLAUSE_DECL (c))
&& is_variable_sized (val))
continue;
by_ref = use_pointer_for_field (val, NULL);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c)
&& !by_ref
&& is_task_ctx (ctx))
TREE_NO_WARNING (var) = 1;
do_in = true;
break;
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE__LOOPTEMP_:
do_in = true;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (by_ref || omp_is_reference (val))
{
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
continue;
do_in = true;
}
else
{
do_out = true;
if (lang_hooks.decls.omp_private_outer_ref (val))
do_in = true;
}
break;
case OMP_CLAUSE_REDUCTION:
do_in = true;
if (val == OMP_CLAUSE_DECL (c))
do_out = !(by_ref || omp_is_reference (val));
else
by_ref = TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE;
break;
default:
gcc_unreachable ();
}
if (do_in)
{
ref = build_sender_ref (val, ctx);
x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
gimplify_assign (ref, x, ilist);
if (is_task_ctx (ctx))
DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
}
if (do_out)
{
ref = build_sender_ref (val, ctx);
gimplify_assign (var, ref, olist);
}
}
}
/* Generate code to implement SHARED from the sender (aka parent)
side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
list things that got automatically shared. */
static void
lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
{
tree var, ovar, nvar, t, f, x, record_type;
if (ctx->record_type == NULL)
return;
record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
if (!ovar || TREE_CODE (ovar) == FIELD_DECL)
continue;
nvar = maybe_lookup_decl (ovar, ctx);
if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
continue;
/* If CTX is a nested parallel directive. Find the immediately
enclosing parallel or workshare construct that contains a
mapping for OVAR. */
var = lookup_decl_in_outer_ctx (ovar, ctx);
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (use_pointer_for_field (ovar, ctx))
{
x = build_sender_ref (ovar, ctx);
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
}
else
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (x, var, ilist);
if (!TREE_READONLY (var)
/* We don't need to receive a new reference to a result
or parm decl. In fact we may not store to it as we will
invalidate any pending RSO and generate wrong gimple
during inlining. */
&& !((TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == PARM_DECL)
&& DECL_BY_REFERENCE (var)))
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (var, x, olist);
}
}
}
}
/* Emit an OpenACC head marker call, encapulating the partitioning and
other information that must be processed by the target compiler.
Return the maximum number of dimensions the associated loop might
be partitioned over. */
static unsigned
lower_oacc_head_mark (location_t loc, tree ddvar, tree clauses,
gimple_seq *seq, omp_context *ctx)
{
unsigned levels = 0;
unsigned tag = 0;
tree gang_static = NULL_TREE;
auto_vec<tree, 5> args;
args.quick_push (build_int_cst
(integer_type_node, IFN_UNIQUE_OACC_HEAD_MARK));
args.quick_push (ddvar);
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
tag |= OLF_DIM_GANG;
gang_static = OMP_CLAUSE_GANG_STATIC_EXPR (c);
/* static:* is represented by -1, and we can ignore it, as
scheduling is always static. */
if (gang_static && integer_minus_onep (gang_static))
gang_static = NULL_TREE;
levels++;
break;
case OMP_CLAUSE_WORKER:
tag |= OLF_DIM_WORKER;
levels++;
break;
case OMP_CLAUSE_VECTOR:
tag |= OLF_DIM_VECTOR;
levels++;
break;
case OMP_CLAUSE_SEQ:
tag |= OLF_SEQ;
break;
case OMP_CLAUSE_AUTO:
tag |= OLF_AUTO;
break;
case OMP_CLAUSE_INDEPENDENT:
tag |= OLF_INDEPENDENT;
break;
case OMP_CLAUSE_TILE:
tag |= OLF_TILE;
break;
default:
continue;
}
}
if (gang_static)
{
if (DECL_P (gang_static))
gang_static = build_outer_var_ref (gang_static, ctx);
tag |= OLF_GANG_STATIC;
}
/* In a parallel region, loops are implicitly INDEPENDENT. */
omp_context *tgt = enclosing_target_ctx (ctx);
if (!tgt || is_oacc_parallel (tgt))
tag |= OLF_INDEPENDENT;
if (tag & OLF_TILE)
/* Tiling could use all 3 levels. */
levels = 3;
else
{
/* A loop lacking SEQ, GANG, WORKER and/or VECTOR could be AUTO.
Ensure at least one level, or 2 for possible auto
partitioning */
bool maybe_auto = !(tag & (((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1)
<< OLF_DIM_BASE) | OLF_SEQ));
if (levels < 1u + maybe_auto)
levels = 1u + maybe_auto;
}
args.quick_push (build_int_cst (integer_type_node, levels));
args.quick_push (build_int_cst (integer_type_node, tag));
if (gang_static)
args.quick_push (gang_static);
gcall *call = gimple_build_call_internal_vec (IFN_UNIQUE, args);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
return levels;
}
/* Emit an OpenACC lopp head or tail marker to SEQ. LEVEL is the
partitioning level of the enclosed region. */
static void
lower_oacc_loop_marker (location_t loc, tree ddvar, bool head,
tree tofollow, gimple_seq *seq)
{
int marker_kind = (head ? IFN_UNIQUE_OACC_HEAD_MARK
: IFN_UNIQUE_OACC_TAIL_MARK);
tree marker = build_int_cst (integer_type_node, marker_kind);
int nargs = 2 + (tofollow != NULL_TREE);
gcall *call = gimple_build_call_internal (IFN_UNIQUE, nargs,
marker, ddvar, tofollow);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
}
/* Generate the before and after OpenACC loop sequences. CLAUSES are
the loop clauses, from which we extract reductions. Initialize
HEAD and TAIL. */
static void
lower_oacc_head_tail (location_t loc, tree clauses,
gimple_seq *head, gimple_seq *tail, omp_context *ctx)
{
bool inner = false;
tree ddvar = create_tmp_var (integer_type_node, ".data_dep");
gimple_seq_add_stmt (head, gimple_build_assign (ddvar, integer_zero_node));
unsigned count = lower_oacc_head_mark (loc, ddvar, clauses, head, ctx);
tree fork_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_FORK);
tree join_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_JOIN);
gcc_assert (count);
for (unsigned done = 1; count; count--, done++)
{
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
tree place = build_int_cst (integer_type_node, -1);
gcall *fork = gimple_build_call_internal (IFN_UNIQUE, 3,
fork_kind, ddvar, place);
gimple_set_location (fork, loc);
gimple_set_lhs (fork, ddvar);
gcall *join = gimple_build_call_internal (IFN_UNIQUE, 3,
join_kind, ddvar, place);
gimple_set_location (join, loc);
gimple_set_lhs (join, ddvar);
/* Mark the beginning of this level sequence. */
if (inner)
lower_oacc_loop_marker (loc, ddvar, true,
build_int_cst (integer_type_node, count),
&fork_seq);
lower_oacc_loop_marker (loc, ddvar, false,
build_int_cst (integer_type_node, done),
&join_seq);
lower_oacc_reductions (loc, clauses, place, inner,
fork, join, &fork_seq, &join_seq, ctx);
/* Append this level to head. */
gimple_seq_add_seq (head, fork_seq);
/* Prepend it to tail. */
gimple_seq_add_seq (&join_seq, *tail);
*tail = join_seq;
inner = true;
}
/* Mark the end of the sequence. */
lower_oacc_loop_marker (loc, ddvar, true, NULL_TREE, head);
lower_oacc_loop_marker (loc, ddvar, false, NULL_TREE, tail);
}
/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
catch handler and return it. This prevents programs from violating the
structured block semantics with throws. */
static gimple_seq
maybe_catch_exception (gimple_seq body)
{
gimple *g;
tree decl;
if (!flag_exceptions)
return body;
if (lang_hooks.eh_protect_cleanup_actions != NULL)
decl = lang_hooks.eh_protect_cleanup_actions ();
else
decl = builtin_decl_explicit (BUILT_IN_TRAP);
g = gimple_build_eh_must_not_throw (decl);
g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
GIMPLE_TRY_CATCH);
return gimple_seq_alloc_with_stmt (g);
}
/* Routines to lower OMP directives into OMP-GIMPLE. */
/* If ctx is a worksharing context inside of a cancellable parallel
region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
and conditional branch to parallel's cancel_label to handle
cancellation in the implicit barrier. */
static void
maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
{
gimple *omp_return = gimple_seq_last_stmt (*body);
gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
if (gimple_omp_return_nowait_p (omp_return))
return;
if (ctx->outer
&& gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
&& ctx->outer->cancellable)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
tree lhs = create_tmp_var (c_bool_type);
gimple_omp_return_set_lhs (omp_return, lhs);
tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (c_bool_type,
boolean_false_node),
ctx->outer->cancel_label, fallthru_label);
gimple_seq_add_stmt (body, g);
gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
}
}
/* Lower the OpenMP sections directive in the current statement in GSI_P.
CTX is the enclosing OMP context for the current statement. */
static void
lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, control;
gimple_stmt_iterator tgsi;
gomp_sections *stmt;
gimple *t;
gbind *new_stmt, *bind;
gimple_seq ilist, dlist, olist, new_body;
stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
dlist = NULL;
ilist = NULL;
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
&ilist, &dlist, ctx, NULL);
new_body = gimple_omp_body (stmt);
gimple_omp_set_body (stmt, NULL);
tgsi = gsi_start (new_body);
for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
{
omp_context *sctx;
gimple *sec_start;
sec_start = gsi_stmt (tgsi);
sctx = maybe_lookup_ctx (sec_start);
gcc_assert (sctx);
lower_omp (gimple_omp_body_ptr (sec_start), sctx);
gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
GSI_CONTINUE_LINKING);
gimple_omp_set_body (sec_start, NULL);
if (gsi_one_before_end_p (tgsi))
{
gimple_seq l = NULL;
lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
&l, ctx);
gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
gimple_omp_section_set_last (sec_start);
}
gsi_insert_after (&tgsi, gimple_build_omp_return (false),
GSI_CONTINUE_LINKING);
}
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, new_body, block);
olist = NULL;
lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, new_stmt, true);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
new_body = NULL;
gimple_seq_add_seq (&new_body, ilist);
gimple_seq_add_stmt (&new_body, stmt);
gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
gimple_seq_add_stmt (&new_body, bind);
control = create_tmp_var (unsigned_type_node, ".section");
t = gimple_build_omp_continue (control, control);
gimple_omp_sections_set_control (stmt, control);
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, olist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, dlist);
new_body = maybe_catch_exception (new_body);
bool nowait = omp_find_clause (gimple_omp_sections_clauses (stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
t = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&new_body, t);
maybe_add_implicit_barrier_cancel (ctx, &new_body);
gimple_bind_set_body (new_stmt, new_body);
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, without a copyprivate clause:
if (GOMP_single_start ())
BODY;
[ GOMP_barrier (); ] -> unless 'nowait' is present.
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
{
location_t loc = gimple_location (single_stmt);
tree tlabel = create_artificial_label (loc);
tree flabel = create_artificial_label (loc);
gimple *call, *cond;
tree lhs, decl;
decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
call = gimple_build_call (decl, 0);
gimple_call_set_lhs (call, lhs);
gimple_seq_add_stmt (pre_p, call);
cond = gimple_build_cond (EQ_EXPR, lhs,
fold_convert_loc (loc, TREE_TYPE (lhs),
boolean_true_node),
tlabel, flabel);
gimple_seq_add_stmt (pre_p, cond);
gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, with a copyprivate clause:
#pragma omp single copyprivate (a, b, c)
Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
{
if ((copyout_p = GOMP_single_copy_start ()) == NULL)
{
BODY;
copyout.a = a;
copyout.b = b;
copyout.c = c;
GOMP_single_copy_end (©out);
}
else
{
a = copyout_p->a;
b = copyout_p->b;
c = copyout_p->c;
}
GOMP_barrier ();
}
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
omp_context *ctx)
{
tree ptr_type, t, l0, l1, l2, bfn_decl;
gimple_seq copyin_seq;
location_t loc = gimple_location (single_stmt);
ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
ptr_type = build_pointer_type (ctx->record_type);
ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
l0 = create_artificial_label (loc);
l1 = create_artificial_label (loc);
l2 = create_artificial_label (loc);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
t = build_call_expr_loc (loc, bfn_decl, 0);
t = fold_convert_loc (loc, ptr_type, t);
gimplify_assign (ctx->receiver_decl, t, pre_p);
t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
build_int_cst (ptr_type, 0));
t = build3 (COND_EXPR, void_type_node, t,
build_and_jump (&l0), build_and_jump (&l1));
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
copyin_seq = NULL;
lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
©in_seq, ctx);
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
t = build_call_expr_loc (loc, bfn_decl, 1, t);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
gimple_seq_add_seq (pre_p, copyin_seq);
gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
}
/* Expand code for an OpenMP single directive. */
static void
lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
gbind *bind;
gimple_seq bind_body, bind_body_tail = NULL, dlist;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
bind_body = NULL;
dlist = NULL;
lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
gimple_seq_add_stmt (&bind_body, single_stmt);
if (ctx->record_type)
lower_omp_single_copy (single_stmt, &bind_body, ctx);
else
lower_omp_single_simple (single_stmt, &bind_body);
gimple_omp_set_body (single_stmt, NULL);
gimple_seq_add_seq (&bind_body, dlist);
bind_body = maybe_catch_exception (bind_body);
bool nowait = omp_find_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
gimple *g = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&bind_body_tail, g);
maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
if (ctx->record_type)
{
gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
tree clobber = build_constructor (ctx->record_type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
clobber), GSI_SAME_STMT);
}
gimple_seq_add_seq (&bind_body, bind_body_tail);
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code for an OpenMP master directive. */
static void
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, lab = NULL, x, bfn_decl;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tseq;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
x = build_call_expr_loc (loc, bfn_decl, 0);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
tseq = NULL;
gimplify_and_add (x, &tseq);
gimple_bind_add_seq (bind, tseq);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_label (lab));
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Expand code for an OpenMP taskgroup directive. */
static void
lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
gcall *x;
gbind *bind;
tree block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
0);
gimple_bind_add_stmt (bind, x);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Fold the OMP_ORDERED_CLAUSES for the OMP_ORDERED in STMT if possible. */
static void
lower_omp_ordered_clauses (gimple_stmt_iterator *gsi_p, gomp_ordered *ord_stmt,
omp_context *ctx)
{
struct omp_for_data fd;
if (!ctx->outer || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR)
return;
unsigned int len = gimple_omp_for_collapse (ctx->outer->stmt);
struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, len);
omp_extract_for_data (as_a <gomp_for *> (ctx->outer->stmt), &fd, loops);
if (!fd.ordered)
return;
tree *list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
tree c = gimple_omp_ordered_clauses (ord_stmt);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
/* Merge depend clauses from multiple adjacent
#pragma omp ordered depend(sink:...) constructs
into one #pragma omp ordered depend(sink:...), so that
we can optimize them together. */
gimple_stmt_iterator gsi = *gsi_p;
gsi_next (&gsi);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt)
|| gimple_code (stmt) == GIMPLE_NOP)
{
gsi_next (&gsi);
continue;
}
if (gimple_code (stmt) != GIMPLE_OMP_ORDERED)
break;
gomp_ordered *ord_stmt2 = as_a <gomp_ordered *> (stmt);
c = gimple_omp_ordered_clauses (ord_stmt2);
if (c == NULL_TREE
|| OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
break;
while (*list_p)
list_p = &OMP_CLAUSE_CHAIN (*list_p);
*list_p = c;
gsi_remove (&gsi, true);
}
}
/* Canonicalize sink dependence clauses into one folded clause if
possible.
The basic algorithm is to create a sink vector whose first
element is the GCD of all the first elements, and whose remaining
elements are the minimum of the subsequent columns.
We ignore dependence vectors whose first element is zero because
such dependencies are known to be executed by the same thread.
We take into account the direction of the loop, so a minimum
becomes a maximum if the loop is iterating forwards. We also
ignore sink clauses where the loop direction is unknown, or where
the offsets are clearly invalid because they are not a multiple
of the loop increment.
For example:
#pragma omp for ordered(2)
for (i=0; i < N; ++i)
for (j=0; j < M; ++j)
{
#pragma omp ordered \
depend(sink:i-8,j-2) \
depend(sink:i,j-1) \ // Completely ignored because i+0.
depend(sink:i-4,j-3) \
depend(sink:i-6,j-4)
#pragma omp ordered depend(source)
}
Folded clause is:
depend(sink:-gcd(8,4,6),-min(2,3,4))
-or-
depend(sink:-2,-2)
*/
/* FIXME: Computing GCD's where the first element is zero is
non-trivial in the presence of collapsed loops. Do this later. */
if (fd.collapse > 1)
return;
wide_int *folded_deps = XALLOCAVEC (wide_int, 2 * len - 1);
memset (folded_deps, 0, sizeof (*folded_deps) * (2 * len - 1));
tree folded_dep = NULL_TREE;
/* TRUE if the first dimension's offset is negative. */
bool neg_offset_p = false;
list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
unsigned int i;
while ((c = *list_p) != NULL)
{
bool remove = false;
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND);
if (OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
goto next_ordered_clause;
tree vec;
for (vec = OMP_CLAUSE_DECL (c), i = 0;
vec && TREE_CODE (vec) == TREE_LIST;
vec = TREE_CHAIN (vec), ++i)
{
gcc_assert (i < len);
/* omp_extract_for_data has canonicalized the condition. */
gcc_assert (fd.loops[i].cond_code == LT_EXPR
|| fd.loops[i].cond_code == GT_EXPR);
bool forward = fd.loops[i].cond_code == LT_EXPR;
bool maybe_lexically_later = true;
/* While the committee makes up its mind, bail if we have any
non-constant steps. */
if (TREE_CODE (fd.loops[i].step) != INTEGER_CST)
goto lower_omp_ordered_ret;
tree itype = TREE_TYPE (TREE_VALUE (vec));
if (POINTER_TYPE_P (itype))
itype = sizetype;
wide_int offset = wide_int::from (TREE_PURPOSE (vec),
TYPE_PRECISION (itype),
TYPE_SIGN (itype));
/* Ignore invalid offsets that are not multiples of the step. */
if (!wi::multiple_of_p
(wi::abs (offset), wi::abs ((wide_int) fd.loops[i].step),
UNSIGNED))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"ignoring sink clause with offset that is not "
"a multiple of the loop step");
remove = true;
goto next_ordered_clause;
}
/* Calculate the first dimension. The first dimension of
the folded dependency vector is the GCD of the first
elements, while ignoring any first elements whose offset
is 0. */
if (i == 0)
{
/* Ignore dependence vectors whose first dimension is 0. */
if (offset == 0)
{
remove = true;
goto next_ordered_clause;
}
else
{
if (!TYPE_UNSIGNED (itype) && (forward ^ wi::neg_p (offset)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"first offset must be in opposite direction "
"of loop iterations");
goto lower_omp_ordered_ret;
}
if (forward)
offset = -offset;
neg_offset_p = forward;
/* Initialize the first time around. */
if (folded_dep == NULL_TREE)
{
folded_dep = c;
folded_deps[0] = offset;
}
else
folded_deps[0] = wi::gcd (folded_deps[0],
offset, UNSIGNED);
}
}
/* Calculate minimum for the remaining dimensions. */
else
{
folded_deps[len + i - 1] = offset;
if (folded_dep == c)
folded_deps[i] = offset;
else if (maybe_lexically_later
&& !wi::eq_p (folded_deps[i], offset))
{
if (forward ^ wi::gts_p (folded_deps[i], offset))
{
unsigned int j;
folded_dep = c;
for (j = 1; j <= i; j++)
folded_deps[j] = folded_deps[len + j - 1];
}
else
maybe_lexically_later = false;
}
}
}
gcc_assert (i == len);
remove = true;
next_ordered_clause:
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
if (folded_dep)
{
if (neg_offset_p)
folded_deps[0] = -folded_deps[0];
tree itype = TREE_TYPE (TREE_VALUE (OMP_CLAUSE_DECL (folded_dep)));
if (POINTER_TYPE_P (itype))
itype = sizetype;
TREE_PURPOSE (OMP_CLAUSE_DECL (folded_dep))
= wide_int_to_tree (itype, folded_deps[0]);
OMP_CLAUSE_CHAIN (folded_dep) = gimple_omp_ordered_clauses (ord_stmt);
*gimple_omp_ordered_clauses_ptr (ord_stmt) = folded_dep;
}
lower_omp_ordered_ret:
/* Ordered without clauses is #pragma omp threads, while we want
a nop instead if we remove all clauses. */
if (gimple_omp_ordered_clauses (ord_stmt) == NULL_TREE)
gsi_replace (gsi_p, gimple_build_nop (), true);
}
/* Expand code for an OpenMP ordered directive. */
static void
lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gimple *stmt = gsi_stmt (*gsi_p), *g;
gomp_ordered *ord_stmt = as_a <gomp_ordered *> (stmt);
gcall *x;
gbind *bind;
bool simd = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_SIMD);
/* FIXME: this should check presence of OMP_CLAUSE__SIMT_ on the enclosing
loop. */
bool maybe_simt
= simd && omp_maybe_offloaded_ctx (ctx) && omp_max_simt_vf () > 1;
bool threads = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_THREADS);
if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_DEPEND))
{
/* FIXME: This is needs to be moved to the expansion to verify various
conditions only testable on cfg with dominators computed, and also
all the depend clauses to be merged still might need to be available
for the runtime checks. */
if (0)
lower_omp_ordered_clauses (gsi_p, ord_stmt, ctx);
return;
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
if (simd)
{
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_START, 1,
build_int_cst (NULL_TREE, threads));
cfun->has_simduid_loops = true;
}
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
0);
gimple_bind_add_stmt (bind, x);
tree counter = NULL_TREE, test = NULL_TREE, body = NULL_TREE;
if (maybe_simt)
{
counter = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0);
gimple_call_set_lhs (g, counter);
gimple_bind_add_stmt (bind, g);
body = create_artificial_label (UNKNOWN_LOCATION);
test = create_artificial_label (UNKNOWN_LOCATION);
gimple_bind_add_stmt (bind, gimple_build_label (body));
tree simt_pred = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_ORDERED_PRED, 1, counter);
gimple_call_set_lhs (g, simt_pred);
gimple_bind_add_stmt (bind, g);
tree t = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, simt_pred, integer_zero_node, t, test);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (t));
}
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
if (maybe_simt)
{
gimple_bind_add_stmt (bind, gimple_build_label (test));
g = gimple_build_assign (counter, MINUS_EXPR, counter, integer_one_node);
gimple_bind_add_stmt (bind, g);
tree c = build2 (GE_EXPR, boolean_type_node, counter, integer_zero_node);
tree nonneg = create_tmp_var (integer_type_node);
gimple_seq tseq = NULL;
gimplify_assign (nonneg, fold_convert (integer_type_node, c), &tseq);
gimple_bind_add_seq (bind, tseq);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY, 1, nonneg);
gimple_call_set_lhs (g, nonneg);
gimple_bind_add_stmt (bind, g);
tree end = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, nonneg, integer_zero_node, body, end);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (end));
}
if (simd)
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_END, 1,
build_int_cst (NULL_TREE, threads));
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END),
0);
gimple_bind_add_stmt (bind, x);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
substitution of a couple of function calls. But in the NAMED case,
requires that languages coordinate a symbol name. It is therefore
best put here in common code. */
static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
static void
lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
tree name, lock, unlock;
gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tbody;
name = gimple_omp_critical_name (stmt);
if (name)
{
tree decl;
if (!critical_name_mutexes)
critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
tree *n = critical_name_mutexes->get (name);
if (n == NULL)
{
char *new_str;
decl = create_tmp_var_raw (ptr_type_node);
new_str = ACONCAT ((".gomp_critical_user_",
IDENTIFIER_POINTER (name), NULL));
DECL_NAME (decl) = get_identifier (new_str);
TREE_PUBLIC (decl) = 1;
TREE_STATIC (decl) = 1;
DECL_COMMON (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
varpool_node::finalize_decl (decl);
critical_name_mutexes->put (name, decl);
}
else
decl = *n;
/* If '#pragma omp critical' is inside offloaded region or
inside function marked as offloadable, the symbol must be
marked as offloadable too. */
omp_context *octx;
if (cgraph_node::get (current_function_decl)->offloadable)
varpool_node::get_create (decl)->offloadable = 1;
else
for (octx = ctx->outer; octx; octx = octx->outer)
if (is_gimple_omp_offloaded (octx->stmt))
{
varpool_node::get_create (decl)->offloadable = 1;
break;
}
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
lock = build_call_expr_loc (loc, lock, 1,
build_fold_addr_expr_loc (loc, decl));
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
unlock = build_call_expr_loc (loc, unlock, 1,
build_fold_addr_expr_loc (loc, decl));
}
else
{
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
lock = build_call_expr_loc (loc, lock, 0);
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
unlock = build_call_expr_loc (loc, unlock, 0);
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
tbody = gimple_bind_body (bind);
gimplify_and_add (lock, &tbody);
gimple_bind_set_body (bind, tbody);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
tbody = gimple_bind_body (bind);
gimplify_and_add (unlock, &tbody);
gimple_bind_set_body (bind, tbody);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* A subroutine of lower_omp_for. Generate code to emit the predicate
for a lastprivate clause. Given a loop control predicate of (V
cond N2), we gate the clause on (!(V cond N2)). The lowered form
is appended to *DLIST, iterator initialization is appended to
*BODY_P. */
static void
lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
gimple_seq *dlist, struct omp_context *ctx)
{
tree clauses, cond, vinit;
enum tree_code cond_code;
gimple_seq stmts;
cond_code = fd->loop.cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
/* When possible, use a strict equality expression. This can let VRP
type optimizations deduce the value and remove a copy. */
if (tree_fits_shwi_p (fd->loop.step))
{
HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
if (step == 1 || step == -1)
cond_code = EQ_EXPR;
}
if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP
|| gimple_omp_for_grid_phony (fd->for_stmt))
cond = omp_grid_lastprivate_predicate (fd);
else
{
tree n2 = fd->loop.n2;
if (fd->collapse > 1
&& TREE_CODE (n2) != INTEGER_CST
&& gimple_omp_for_combined_into_p (fd->for_stmt))
{
struct omp_context *taskreg_ctx = NULL;
if (gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
{
gomp_for *gfor = as_a <gomp_for *> (ctx->outer->stmt);
if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (gimple_omp_for_combined_into_p (gfor))
{
gcc_assert (ctx->outer->outer
&& is_parallel_ctx (ctx->outer->outer));
taskreg_ctx = ctx->outer->outer;
}
else
{
struct omp_for_data outer_fd;
omp_extract_for_data (gfor, &outer_fd, NULL);
n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
}
}
else if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_TASKLOOP)
taskreg_ctx = ctx->outer->outer;
}
else if (is_taskreg_ctx (ctx->outer))
taskreg_ctx = ctx->outer;
if (taskreg_ctx)
{
int i;
tree taskreg_clauses
= gimple_omp_taskreg_clauses (taskreg_ctx->stmt);
tree innerc = omp_find_clause (taskreg_clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
for (i = 0; i < fd->collapse; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
}
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
if (innerc)
n2 = fold_convert (TREE_TYPE (n2),
lookup_decl (OMP_CLAUSE_DECL (innerc),
taskreg_ctx));
}
}
cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
}
clauses = gimple_omp_for_clauses (fd->for_stmt);
stmts = NULL;
lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
if (!gimple_seq_empty_p (stmts))
{
gimple_seq_add_seq (&stmts, *dlist);
*dlist = stmts;
/* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
vinit = fd->loop.n1;
if (cond_code == EQ_EXPR
&& tree_fits_shwi_p (fd->loop.n2)
&& ! integer_zerop (fd->loop.n2))
vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
else
vinit = unshare_expr (vinit);
/* Initialize the iterator variable, so that threads that don't execute
any iterations don't execute the lastprivate clauses by accident. */
gimplify_assign (fd->loop.v, vinit, body_p);
}
}
/* Lower code for an OMP loop directive. */
static void
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree *rhs_p, block;
struct omp_for_data fd, *fdp = NULL;
gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
gbind *new_stmt;
gimple_seq omp_for_body, body, dlist;
gimple_seq oacc_head = NULL, oacc_tail = NULL;
size_t i;
push_gimplify_context ();
lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
/* Replace at gsi right away, so that 'stmt' is no member
of a sequence anymore as we're going to add to a different
one below. */
gsi_replace (gsi_p, new_stmt, true);
/* Move declaration of temporaries in the loop body before we make
it go away. */
omp_for_body = gimple_omp_body (stmt);
if (!gimple_seq_empty_p (omp_for_body)
&& gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
{
gbind *inner_bind
= as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
tree vars = gimple_bind_vars (inner_bind);
gimple_bind_append_vars (new_stmt, vars);
/* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
keep them on the inner_bind and it's block. */
gimple_bind_set_vars (inner_bind, NULL_TREE);
if (gimple_bind_block (inner_bind))
BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
}
if (gimple_omp_for_combined_into_p (stmt))
{
omp_extract_for_data (stmt, &fd, NULL);
fdp = &fd;
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
count += fd.collapse - 1;
bool taskreg_for
= (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP);
tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
tree simtc = NULL;
tree clauses = *pc;
if (taskreg_for)
outerc
= omp_find_clause (gimple_omp_taskreg_clauses (ctx->outer->stmt),
OMP_CLAUSE__LOOPTEMP_);
if (ctx->simt_stmt)
simtc = omp_find_clause (gimple_omp_for_clauses (ctx->simt_stmt),
OMP_CLAUSE__LOOPTEMP_);
for (i = 0; i < count; i++)
{
tree temp;
if (taskreg_for)
{
gcc_assert (outerc);
temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
outerc = omp_find_clause (OMP_CLAUSE_CHAIN (outerc),
OMP_CLAUSE__LOOPTEMP_);
}
else
{
/* If there are 2 adjacent SIMD stmts, one with _simt_
clause, another without, make sure they have the same
decls in _looptemp_ clauses, because the outer stmt
they are combined into will look up just one inner_stmt. */
if (ctx->simt_stmt)
temp = OMP_CLAUSE_DECL (simtc);
else
temp = create_tmp_var (type);
insert_decl_map (&ctx->outer->cb, temp, temp);
}
*pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
OMP_CLAUSE_DECL (*pc) = temp;
pc = &OMP_CLAUSE_CHAIN (*pc);
if (ctx->simt_stmt)
simtc = omp_find_clause (OMP_CLAUSE_CHAIN (simtc),
OMP_CLAUSE__LOOPTEMP_);
}
*pc = clauses;
}
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
dlist = NULL;
body = NULL;
lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
fdp);
gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
lower_omp (gimple_omp_body_ptr (stmt), ctx);
/* Lower the header expressions. At this point, we can assume that
the header is of the form:
#pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
We just need to make sure that VAL1, VAL2 and VAL3 are lowered
using the .omp_data_s mapping, if needed. */
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
rhs_p = gimple_omp_for_initial_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &body);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = gimple_omp_for_final_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &body);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &body);
}
/* Once lowered, extract the bounds and clauses. */
omp_extract_for_data (stmt, &fd, NULL);
if (is_gimple_omp_oacc (ctx->stmt)
&& !ctx_in_oacc_kernels_region (ctx))
lower_oacc_head_tail (gimple_location (stmt),
gimple_omp_for_clauses (stmt),
&oacc_head, &oacc_tail, ctx);
/* Add OpenACC partitioning and reduction markers just before the loop. */
if (oacc_head)
gimple_seq_add_seq (&body, oacc_head);
lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
OMP_CLAUSE_DECL (c) = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
if (DECL_P (OMP_CLAUSE_LINEAR_STEP (c)))
OMP_CLAUSE_LINEAR_STEP (c)
= maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_LINEAR_STEP (c),
ctx);
}
bool phony_loop = (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP
&& gimple_omp_for_grid_phony (stmt));
if (!phony_loop)
gimple_seq_add_stmt (&body, stmt);
gimple_seq_add_seq (&body, gimple_omp_body (stmt));
if (!phony_loop)
gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
fd.loop.v));
/* After the loop, add exit clauses. */
lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
if (ctx->cancellable)
gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&body, dlist);
body = maybe_catch_exception (body);
if (!phony_loop)
{
/* Region exit marker goes at the end of the loop body. */
gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
maybe_add_implicit_barrier_cancel (ctx, &body);
}
/* Add OpenACC joining and reduction markers just after the loop. */
if (oacc_tail)
gimple_seq_add_seq (&body, oacc_tail);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
maybe_remove_omp_member_access_dummy_vars (new_stmt);
BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
gimple_bind_set_body (new_stmt, body);
gimple_omp_set_body (stmt, NULL);
gimple_omp_for_set_pre_body (stmt, NULL);
}
/* Callback for walk_stmts. Check if the current statement only contains
GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
static tree
check_combined_parallel (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
int *info = (int *) wi->info;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
*info = *info == 0 ? 1 : -1;
break;
default:
*info = -1;
break;
}
return NULL;
}
struct omp_taskcopy_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
omp_context *ctx;
};
static tree
task_copyfn_copy_decl (tree var, copy_body_data *cb)
{
struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
return create_tmp_var (TREE_TYPE (var));
return var;
}
static tree
task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
{
tree name, new_fields = NULL, type, f;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (orig_type));
name = build_decl (gimple_location (tcctx->ctx->stmt),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
TREE_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&tcctx->cb, NULL);
new_fields = new_f;
tcctx->cb.decl_map->put (f, new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
return type;
}
/* Create task copyfn. */
static void
create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
{
struct function *child_cfun;
tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
tree record_type, srecord_type, bind, list;
bool record_needs_remap = false, srecord_needs_remap = false;
splay_tree_node n;
struct omp_taskcopy_context tcctx;
location_t loc = gimple_location (task_stmt);
child_fn = gimple_omp_task_copy_fn (task_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
gcc_assert (child_cfun->cfg == NULL);
DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
/* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Populate the function. */
push_gimplify_context ();
push_cfun (child_cfun);
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
list = NULL;
DECL_SAVED_TREE (child_fn) = bind;
DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
/* Remap src and dst argument types if needed. */
record_type = ctx->record_type;
srecord_type = ctx->srecord_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
record_needs_remap = true;
break;
}
for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
srecord_needs_remap = true;
break;
}
if (record_needs_remap || srecord_needs_remap)
{
memset (&tcctx, '\0', sizeof (tcctx));
tcctx.cb.src_fn = ctx->cb.src_fn;
tcctx.cb.dst_fn = child_fn;
tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
gcc_checking_assert (tcctx.cb.src_node);
tcctx.cb.dst_node = tcctx.cb.src_node;
tcctx.cb.src_cfun = ctx->cb.src_cfun;
tcctx.cb.copy_decl = task_copyfn_copy_decl;
tcctx.cb.eh_lp_nr = 0;
tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
tcctx.cb.decl_map = new hash_map<tree, tree>;
tcctx.ctx = ctx;
if (record_needs_remap)
record_type = task_copyfn_remap_type (&tcctx, record_type);
if (srecord_needs_remap)
srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
}
else
tcctx.cb.decl_map = NULL;
arg = DECL_ARGUMENTS (child_fn);
TREE_TYPE (arg) = build_pointer_type (record_type);
sarg = DECL_CHAIN (arg);
TREE_TYPE (sarg) = build_pointer_type (srecord_type);
/* First pass: initialize temporaries used in record_type and srecord_type
sizes and field offsets. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree *p;
decl = OMP_CLAUSE_DECL (c);
p = tcctx.cb.decl_map->get (decl);
if (p == NULL)
continue;
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
append_to_statement_list (t, &list);
}
/* Second pass: copy shared var pointers and copy construct non-VLA
firstprivate vars. */
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
splay_tree_key key;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
key = (splay_tree_key) decl;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
key = (splay_tree_key) &DECL_UID (decl);
n = splay_tree_lookup (ctx->field_map, key);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, key);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
break;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL) || omp_is_reference (decl))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE_PRIVATE:
if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
default:
break;
}
/* Last pass: handle VLA firstprivates. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree ind, ptr, df;
decl = OMP_CLAUSE_DECL (c);
if (!is_variable_sized (decl))
continue;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
continue;
f = (tree) n->value;
f = *tcctx.cb.decl_map->get (f);
gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
ind = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
n = splay_tree_lookup (ctx->sfield_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
src = build_simple_mem_ref_loc (loc, src);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
n = splay_tree_lookup (ctx->field_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
df = (tree) n->value;
df = *tcctx.cb.decl_map->get (df);
ptr = build_simple_mem_ref_loc (loc, arg);
ptr = omp_build_component_ref (ptr, df);
t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
build_fold_addr_expr_loc (loc, dst));
append_to_statement_list (t, &list);
}
t = build1 (RETURN_EXPR, void_type_node, NULL);
append_to_statement_list (t, &list);
if (tcctx.cb.decl_map)
delete tcctx.cb.decl_map;
pop_gimplify_context (NULL);
BIND_EXPR_BODY (bind) = list;
pop_cfun ();
}
static void
lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
{
tree c, clauses;
gimple *g;
size_t n_in = 0, n_out = 0, idx = 2, i;
clauses = omp_find_clause (*pclauses, OMP_CLAUSE_DEPEND);
gcc_assert (clauses);
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_IN:
n_in++;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
n_out++;
break;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
/* FALLTHRU */
default:
gcc_unreachable ();
}
tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
tree array = create_tmp_var (type);
TREE_ADDRESSABLE (array) = 1;
tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
NULL_TREE);
g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
gimple_seq_add_stmt (iseq, g);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
NULL_TREE);
g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
gimple_seq_add_stmt (iseq, g);
for (i = 0; i < 2; i++)
{
if ((i ? n_in : n_out) == 0)
continue;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
{
tree t = OMP_CLAUSE_DECL (c);
t = fold_convert (ptr_type_node, t);
gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
NULL_TREE, NULL_TREE);
g = gimple_build_assign (r, t);
gimple_seq_add_stmt (iseq, g);
}
}
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
OMP_CLAUSE_CHAIN (c) = *pclauses;
*pclauses = c;
tree clobber = build_constructor (type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
g = gimple_build_assign (array, clobber);
gimple_seq_add_stmt (oseq, g);
}
/* Lower the OpenMP parallel or task directive in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *par_bind, *bind, *dep_bind = NULL;
gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
par_bind
= as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
par_body = gimple_bind_body (par_bind);
child_fn = ctx->cb.dst_fn;
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& !gimple_omp_parallel_combined_p (stmt))
{
struct walk_stmt_info wi;
int ws_num = 0;
memset (&wi, 0, sizeof (wi));
wi.info = &ws_num;
wi.val_only = true;
walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
if (ws_num == 1)
gimple_omp_parallel_set_combined_p (stmt, true);
}
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_task_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
if (ctx->srecord_type)
create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
push_gimplify_context ();
par_olist = NULL;
par_ilist = NULL;
par_rlist = NULL;
bool phony_construct = gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& gimple_omp_parallel_grid_phony (as_a <gomp_parallel *> (stmt));
if (phony_construct && ctx->record_type)
{
gcc_checking_assert (!ctx->receiver_decl);
ctx->receiver_decl = create_tmp_var
(build_reference_type (ctx->record_type), ".omp_rec");
}
lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
lower_omp (&par_body, ctx);
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
lower_reduction_clauses (clauses, &par_rlist, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (par_bind);
record_vars_into (gimple_bind_vars (par_bind), child_fn);
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->srecord_type ? ctx->srecord_type
: ctx->record_type, ".omp_data_o");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
}
olist = NULL;
ilist = NULL;
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
if (ctx->record_type)
{
tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
new_body = NULL;
if (ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, par_ilist);
gimple_seq_add_seq (&new_body, par_body);
gimple_seq_add_seq (&new_body, par_rlist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, par_olist);
new_body = maybe_catch_exception (new_body);
if (gimple_code (stmt) == GIMPLE_OMP_TASK)
gimple_seq_add_stmt (&new_body,
gimple_build_omp_continue (integer_zero_node,
integer_zero_node));
if (!phony_construct)
{
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
}
bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
if (!phony_construct)
gimple_bind_add_stmt (bind, stmt);
else
gimple_bind_add_seq (bind, new_body);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Lower the GIMPLE_OMP_TARGET in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t, c;
gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
gbind *tgt_bind, *bind, *dep_bind = NULL;
gimple_seq tgt_body, olist, ilist, fplist, new_body;
location_t loc = gimple_location (stmt);
bool offloaded, data_region;
unsigned int map_cnt = 0;
offloaded = is_gimple_omp_offloaded (stmt);
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION:
case GF_OMP_TARGET_KIND_UPDATE:
case GF_OMP_TARGET_KIND_ENTER_DATA:
case GF_OMP_TARGET_KIND_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_UPDATE:
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_DECLARE:
data_region = false;
break;
case GF_OMP_TARGET_KIND_DATA:
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
data_region = true;
break;
default:
gcc_unreachable ();
}
clauses = gimple_omp_target_clauses (stmt);
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_target_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
tgt_bind = NULL;
tgt_body = NULL;
if (offloaded)
{
tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
tgt_body = gimple_bind_body (tgt_bind);
}
else if (data_region)
tgt_body = gimple_omp_body (stmt);
child_fn = ctx->cb.dst_fn;
push_gimplify_context ();
fplist = NULL;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_MAP:
#if CHECKING_P
/* First check what we're prepared to handle in the following. */
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_POINTER:
case GOMP_MAP_TO_PSET:
case GOMP_MAP_DELETE:
case GOMP_MAP_RELEASE:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
case GOMP_MAP_STRUCT:
case GOMP_MAP_ALWAYS_POINTER:
break;
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_DEVICE_RESIDENT:
case GOMP_MAP_LINK:
gcc_assert (is_gimple_omp_oacc (stmt));
break;
default:
gcc_unreachable ();
}
#endif
/* FALLTHRU */
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate:
var = OMP_CLAUSE_DECL (c);
if (!DECL_P (var))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (!OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER)))
map_cnt++;
continue;
}
if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
if (offloaded
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
{
if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))
&& varpool_node::get_create (var)->offloadable)
continue;
tree type = build_pointer_type (TREE_TYPE (var));
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
}
if (!maybe_lookup_field (var, ctx))
continue;
/* Don't remap oacc parallel reduction variables, because the
intermediate result must be local to each gang. */
if (offloaded && !(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_IN_REDUCTION (c)))
{
x = build_receiver_ref (var, true, ctx);
tree new_var = lookup_decl (var, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
x = build_simple_mem_ref (x);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (omp_is_reference (new_var))
{
/* Create a local object to hold the instance
value. */
tree type = TREE_TYPE (TREE_TYPE (new_var));
const char *id = IDENTIFIER_POINTER (DECL_NAME (new_var));
tree inst = create_tmp_var (type, id);
gimplify_assign (inst, fold_indirect_ref (x), &fplist);
x = build_fold_addr_expr (inst);
}
gimplify_assign (new_var, x, &fplist);
}
else if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
gcc_unreachable ();
}
map_cnt++;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_oacc_parallel (ctx))
goto oacc_firstprivate;
map_cnt++;
var = OMP_CLAUSE_DECL (c);
if (!omp_is_reference (var)
&& !is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
}
else
x = build_receiver_ref (var, true, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_PRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
var = OMP_CLAUSE_DECL (c);
map_cnt++;
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
tree new_var = lookup_decl (var, ctx);
tree type = build_pointer_type (TREE_TYPE (var));
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
{
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (TREE_TYPE (new_var), get_name (new_var));
gimple_add_tmp_var (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
}
if (offloaded)
{
target_nesting_level++;
lower_omp (&tgt_body, ctx);
target_nesting_level--;
}
else if (data_region)
lower_omp (&tgt_body, ctx);
if (offloaded)
{
/* Declare all the variables created by mapping and the variables
declared in the scope of the target body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (tgt_bind);
record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
}
olist = NULL;
ilist = NULL;
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->record_type, ".omp_data_arr");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
t = make_tree_vec (3);
TREE_VEC_ELT (t, 0) = ctx->sender_decl;
TREE_VEC_ELT (t, 1)
= create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
".omp_data_sizes");
DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
tree tkind_type = short_unsigned_type_node;
int talign_shift = 8;
TREE_VEC_ELT (t, 2)
= create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
".omp_data_kinds");
DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
gimple_omp_target_set_data_arg (stmt, t);
vec<constructor_elt, va_gc> *vsize;
vec<constructor_elt, va_gc> *vkind;
vec_alloc (vsize, map_cnt);
vec_alloc (vkind, map_cnt);
unsigned int map_idx = 0;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree ovar, nc, s, purpose, var, x, type;
unsigned int talign;
default:
break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate_map:
nc = c;
ovar = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
break;
if (!DECL_P (ovar))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
{
gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
== get_base_address (ovar));
nc = OMP_CLAUSE_CHAIN (c);
ovar = OMP_CLAUSE_DECL (nc);
}
else
{
tree x = build_sender_ref (ovar, ctx);
tree v
= build_fold_addr_expr_with_type (ovar, ptr_type_node);
gimplify_assign (x, v, &ilist);
nc = NULL_TREE;
}
}
else
{
if (DECL_SIZE (ovar)
&& TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
{
tree ovar2 = DECL_VALUE_EXPR (ovar);
gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
ovar2 = TREE_OPERAND (ovar2, 0);
gcc_assert (DECL_P (ovar2));
ovar = ovar2;
}
if (!maybe_lookup_field (ovar, ctx))
continue;
}
talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
talign = DECL_ALIGN_UNIT (ovar);
if (nc)
{
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
{
gcc_assert (offloaded);
tree avar
= create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
mark_addressable (avar);
gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
talign = DECL_ALIGN_UNIT (avar);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (!omp_is_reference (var))
{
if (is_gimple_reg (var)
&& OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
var = build_fold_addr_expr (var);
}
else
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
gimplify_assign (x, var, &ilist);
}
else if (is_gimple_reg (var))
{
gcc_assert (offloaded);
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
if (GOMP_MAP_COPY_TO_P (map_kind)
|| map_kind == GOMP_MAP_POINTER
|| map_kind == GOMP_MAP_TO_PSET
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
{
/* If we need to initialize a temporary
with VAR because it is not addressable, and
the variable hasn't been initialized yet, then
we'll get a warning for the store to avar.
Don't warn in that case, the mapping might
be implicit. */
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
}
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
if ((GOMP_MAP_COPY_FROM_P (map_kind)
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
&& !TYPE_READONLY (TREE_TYPE (var)))
{
x = unshare_expr (x);
x = build_simple_mem_ref (x);
gimplify_assign (var, x, &olist);
}
}
else
{
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
}
s = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
s = TREE_TYPE (ovar);
if (TREE_CODE (s) == REFERENCE_TYPE)
s = TREE_TYPE (s);
s = TYPE_SIZE_UNIT (s);
}
else
s = OMP_CLAUSE_SIZE (c);
if (s == NULL_TREE)
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
unsigned HOST_WIDE_INT tkind, tkind_zero;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_MAP:
tkind = OMP_CLAUSE_MAP_KIND (c);
tkind_zero = tkind;
if (OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c))
switch (tkind)
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
tkind_zero = GOMP_MAP_ZERO_LEN_ARRAY_SECTION;
break;
case GOMP_MAP_DELETE:
tkind_zero = GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION;
default:
break;
}
if (tkind_zero != tkind)
{
if (integer_zerop (s))
tkind = tkind_zero;
else if (integer_nonzerop (s))
tkind_zero = tkind;
}
break;
case OMP_CLAUSE_FIRSTPRIVATE:
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_TO:
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_FROM:
tkind = GOMP_MAP_FROM;
tkind_zero = tkind;
break;
default:
gcc_unreachable ();
}
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind_zero
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
tkind_zero |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
gcc_checking_assert (tkind_zero
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
if (tkind == tkind_zero)
x = build_int_cstu (tkind_type, tkind);
else
{
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 0;
x = build3 (COND_EXPR, tkind_type,
fold_build2 (EQ_EXPR, boolean_type_node,
unshare_expr (s), size_zero_node),
build_int_cstu (tkind_type, tkind_zero),
build_int_cstu (tkind_type, tkind));
}
CONSTRUCTOR_APPEND_ELT (vkind, purpose, x);
if (nc && nc != c)
c = nc;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_oacc_parallel (ctx))
goto oacc_firstprivate_map;
ovar = OMP_CLAUSE_DECL (c);
if (omp_is_reference (ovar))
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
talign = DECL_ALIGN_UNIT (ovar);
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
tkind = GOMP_MAP_FIRSTPRIVATE;
type = TREE_TYPE (ovar);
if (omp_is_reference (ovar))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
tree t = var;
if (omp_is_reference (var))
t = build_simple_mem_ref (var);
else if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
if (TREE_CODE (type) != POINTER_TYPE)
t = fold_convert (pointer_sized_int_node, t);
t = fold_convert (TREE_TYPE (x), t);
gimplify_assign (x, t, &ilist);
}
else if (omp_is_reference (var))
gimplify_assign (x, var, &ilist);
else if (is_gimple_reg (var))
{
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else
{
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
if (tkind == GOMP_MAP_FIRSTPRIVATE_INT)
s = size_int (0);
else if (omp_is_reference (ovar))
s = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
ovar = OMP_CLAUSE_DECL (c);
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
tkind = GOMP_MAP_USE_DEVICE_PTR;
else
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
type = TREE_TYPE (ovar);
if (TREE_CODE (type) == ARRAY_TYPE)
var = build_fold_addr_expr (var);
else
{
if (omp_is_reference (ovar))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE)
var = build_simple_mem_ref (var);
var = fold_convert (TREE_TYPE (x), var);
}
}
gimplify_assign (x, var, &ilist);
s = size_int (0);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
}
gcc_assert (map_idx == map_cnt);
DECL_INITIAL (TREE_VEC_ELT (t, 1))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
DECL_INITIAL (TREE_VEC_ELT (t, 2))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
for (int i = 1; i <= 2; i++)
if (!TREE_STATIC (TREE_VEC_ELT (t, i)))
{
gimple_seq initlist = NULL;
force_gimple_operand (build1 (DECL_EXPR, void_type_node,
TREE_VEC_ELT (t, i)),
&initlist, true, NULL_TREE);
gimple_seq_add_seq (&ilist, initlist);
tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, i)),
NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple_seq_add_stmt (&olist,
gimple_build_assign (TREE_VEC_ELT (t, i),
clobber));
}
tree clobber = build_constructor (ctx->record_type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
new_body = NULL;
if (offloaded
&& ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, fplist);
if (offloaded || data_region)
{
tree prev = NULL_TREE;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var)
|| is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
tree type;
type = TREE_TYPE (var);
if (omp_is_reference (var))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
x = build_receiver_ref (var, false, ctx);
if (TREE_CODE (type) != POINTER_TYPE)
x = fold_convert (pointer_sized_int_node, x);
x = fold_convert (type, x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
if (omp_is_reference (var))
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else
{
x = build_receiver_ref (var, !omp_is_reference (var), ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
}
else if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_var = lookup_decl (pvar, ctx);
x = build_receiver_ref (var, false, ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_PRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
break;
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
var = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
x = build_sender_ref (var, ctx);
else
x = build_receiver_ref (var, false, ctx);
if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_var = lookup_decl (pvar, ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
tree new_var = lookup_decl (var, ctx);
new_var = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_var = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_var));
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else
{
tree type = TREE_TYPE (var);
tree new_var = lookup_decl (var, ctx);
if (omp_is_reference (var))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE)
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
x = fold_convert (type, x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
}
}
new_var = DECL_VALUE_EXPR (new_var);
x = fold_convert (TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
}
/* Handle GOMP_MAP_FIRSTPRIVATE_{POINTER,REFERENCE} in second pass,
so that firstprivate vars holding OMP_CLAUSE_SIZE if needed
are already handled. Similarly OMP_CLAUSE_PRIVATE for VLAs
or references to VLAs. */
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var;
default:
break;
case OMP_CLAUSE_MAP:
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
HOST_WIDE_INT offset = 0;
gcc_assert (prev);
var = OMP_CLAUSE_DECL (c);
if (DECL_P (var)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
&& varpool_node::get_create (var)->offloadable)
break;
if (TREE_CODE (var) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (var, 0)) == COMPONENT_REF)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == COMPONENT_REF)
{
var = get_addr_base_and_unit_offset (var, &offset);
gcc_assert (var != NULL_TREE && DECL_P (var));
}
else if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
tree new_var = lookup_decl (var, ctx), x;
tree type = TREE_TYPE (new_var);
bool is_ref;
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == INDIRECT_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== COMPONENT_REF))
{
type = TREE_TYPE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0));
is_ref = true;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
{
type = TREE_TYPE (OMP_CLAUSE_DECL (c));
is_ref = TREE_CODE (type) == REFERENCE_TYPE;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else
is_ref = omp_is_reference (var);
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
is_ref = false;
bool ref_to_array = false;
if (is_ref)
{
type = TREE_TYPE (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
type = build_pointer_type (type);
ref_to_array = true;
}
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
tree decl2 = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (decl2) == MEM_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
new_var = decl2;
type = TREE_TYPE (new_var);
}
x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
x = fold_convert_loc (clause_loc, type, x);
if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
{
tree bias = OMP_CLAUSE_SIZE (c);
if (DECL_P (bias))
bias = lookup_decl (bias, ctx);
bias = fold_convert_loc (clause_loc, sizetype, bias);
bias = fold_build1_loc (clause_loc, NEGATE_EXPR, sizetype,
bias);
x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (x), x, bias);
}
if (ref_to_array)
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
if (is_ref && !ref_to_array)
{
tree t = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (t);
TREE_ADDRESSABLE (t) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (t, x));
x = build_fold_addr_expr_loc (clause_loc, t);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
prev = NULL_TREE;
}
else if (OMP_CLAUSE_CHAIN (c)
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c))
== OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
prev = c;
break;
case OMP_CLAUSE_PRIVATE:
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree al = size_int (DECL_ALIGN (var));
tree x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_pvar), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_pvar, x));
}
else if (omp_is_reference (var) && !is_gimple_omp_oacc (ctx->stmt))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
break;
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
}
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
if (is_oacc_parallel (ctx))
{
/* If there are reductions on the offloaded region itself, treat
them as a dummy GANG loop. */
tree level = build_int_cst (integer_type_node, GOMP_DIM_GANG);
lower_oacc_reductions (gimple_location (ctx->stmt), clauses, level,
false, NULL, NULL, &fork_seq, &join_seq, ctx);
}
gimple_seq_add_seq (&new_body, fork_seq);
gimple_seq_add_seq (&new_body, tgt_body);
gimple_seq_add_seq (&new_body, join_seq);
if (offloaded)
new_body = maybe_catch_exception (new_body);
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
}
bind = gimple_build_bind (NULL, NULL,
tgt_bind ? gimple_bind_block (tgt_bind)
: NULL_TREE);
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
gimple_bind_add_stmt (bind, stmt);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Expand code for an OpenMP teams directive. */
static void
lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
tree block = make_node (BLOCK);
gbind *bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_seq bind_body = NULL;
gimple_seq dlist = NULL;
gimple_seq olist = NULL;
tree num_teams = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_NUM_TEAMS);
if (num_teams == NULL_TREE)
num_teams = build_int_cst (unsigned_type_node, 0);
else
{
num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
num_teams = fold_convert (unsigned_type_node, num_teams);
gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
}
tree thread_limit = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_THREAD_LIMIT);
if (thread_limit == NULL_TREE)
thread_limit = build_int_cst (unsigned_type_node, 0);
else
{
thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
thread_limit = fold_convert (unsigned_type_node, thread_limit);
gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
fb_rvalue);
}
lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
if (!gimple_omp_teams_grid_phony (teams_stmt))
{
gimple_seq_add_stmt (&bind_body, teams_stmt);
location_t loc = gimple_location (teams_stmt);
tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
gimple *call = gimple_build_call (decl, 2, num_teams, thread_limit);
gimple_set_location (call, loc);
gimple_seq_add_stmt (&bind_body, call);
}
gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
gimple_omp_set_body (teams_stmt, NULL);
gimple_seq_add_seq (&bind_body, olist);
gimple_seq_add_seq (&bind_body, dlist);
if (!gimple_omp_teams_grid_phony (teams_stmt))
gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code within an artificial GIMPLE_OMP_GRID_BODY OMP construct. */
static void
lower_omp_grid_body (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_seq_add_stmt (gimple_omp_body_ptr (stmt),
gimple_build_omp_return (false));
}
/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
regimplified. If DATA is non-NULL, lower_omp_1 is outside
of OMP context, but with task_shared_vars set. */
static tree
lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = *tp;
/* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
if (VAR_P (t) && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
return t;
if (task_shared_vars
&& DECL_P (t)
&& bitmap_bit_p (task_shared_vars, DECL_UID (t)))
return t;
/* If a global variable has been privatized, TREE_CONSTANT on
ADDR_EXPR might be wrong. */
if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (t);
*walk_subtrees = !IS_TYPE_OR_DECL_P (t);
return NULL_TREE;
}
/* Data to be communicated between lower_omp_regimplify_operands and
lower_omp_regimplify_operands_p. */
struct lower_omp_regimplify_operands_data
{
omp_context *ctx;
vec<tree> *decls;
};
/* Helper function for lower_omp_regimplify_operands. Find
omp_member_access_dummy_var vars and adjust temporarily their
DECL_VALUE_EXPRs if needed. */
static tree
lower_omp_regimplify_operands_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = omp_member_access_dummy_var (*tp);
if (t)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
lower_omp_regimplify_operands_data *ldata
= (lower_omp_regimplify_operands_data *) wi->info;
tree o = maybe_lookup_decl (t, ldata->ctx);
if (o != t)
{
ldata->decls->safe_push (DECL_VALUE_EXPR (*tp));
ldata->decls->safe_push (*tp);
tree v = unshare_and_remap (DECL_VALUE_EXPR (*tp), t, o);
SET_DECL_VALUE_EXPR (*tp, v);
}
}
*walk_subtrees = !IS_TYPE_OR_DECL_P (*tp);
return NULL_TREE;
}
/* Wrapper around gimple_regimplify_operands that adjusts DECL_VALUE_EXPRs
of omp_member_access_dummy_var vars during regimplification. */
static void
lower_omp_regimplify_operands (omp_context *ctx, gimple *stmt,
gimple_stmt_iterator *gsi_p)
{
auto_vec<tree, 10> decls;
if (ctx)
{
struct walk_stmt_info wi;
memset (&wi, '\0', sizeof (wi));
struct lower_omp_regimplify_operands_data data;
data.ctx = ctx;
data.decls = &decls;
wi.info = &data;
walk_gimple_op (stmt, lower_omp_regimplify_operands_p, &wi);
}
gimple_regimplify_operands (stmt, gsi_p);
while (!decls.is_empty ())
{
tree t = decls.pop ();
tree v = decls.pop ();
SET_DECL_VALUE_EXPR (t, v);
}
}
static void
lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
struct walk_stmt_info wi;
gcall *call_stmt;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
if (task_shared_vars)
memset (&wi, '\0', sizeof (wi));
/* If we have issued syntax errors, avoid doing any heavy lifting.
Just replace the OMP directives with a NOP to avoid
confusing RTL expansion. */
if (seen_error () && is_gimple_omp (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
return;
}
switch (gimple_code (stmt))
{
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
if ((ctx || task_shared_vars)
&& (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)))
lower_omp_regimplify_operands (ctx, cond_stmt, gsi_p);
}
break;
case GIMPLE_CATCH:
lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
break;
case GIMPLE_EH_FILTER:
lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
break;
case GIMPLE_TRY:
lower_omp (gimple_try_eval_ptr (stmt), ctx);
lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
break;
case GIMPLE_TRANSACTION:
lower_omp (gimple_transaction_body_ptr (as_a <gtransaction *> (stmt)),
ctx);
break;
case GIMPLE_BIND:
lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
maybe_remove_omp_member_access_dummy_vars (as_a <gbind *> (stmt));
break;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_taskreg (gsi_p, ctx);
break;
case GIMPLE_OMP_FOR:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_for (gsi_p, ctx);
break;
case GIMPLE_OMP_SECTIONS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_sections (gsi_p, ctx);
break;
case GIMPLE_OMP_SINGLE:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_single (gsi_p, ctx);
break;
case GIMPLE_OMP_MASTER:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_master (gsi_p, ctx);
break;
case GIMPLE_OMP_TASKGROUP:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_taskgroup (gsi_p, ctx);
break;
case GIMPLE_OMP_ORDERED:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_ordered (gsi_p, ctx);
break;
case GIMPLE_OMP_CRITICAL:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_critical (gsi_p, ctx);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
if ((ctx || task_shared_vars)
&& walk_tree (gimple_omp_atomic_load_rhs_ptr (
as_a <gomp_atomic_load *> (stmt)),
lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
break;
case GIMPLE_OMP_TARGET:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_target (gsi_p, ctx);
break;
case GIMPLE_OMP_TEAMS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_teams (gsi_p, ctx);
break;
case GIMPLE_OMP_GRID_BODY:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_grid_body (gsi_p, ctx);
break;
case GIMPLE_CALL:
tree fndecl;
call_stmt = as_a <gcall *> (stmt);
fndecl = gimple_call_fndecl (call_stmt);
if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
if (ctx == NULL)
break;
/* FALLTHRU */
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
omp_context *cctx;
cctx = ctx;
if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
cctx = cctx->outer;
gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
if (!cctx->cancellable)
{
if (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_GOMP_CANCELLATION_POINT)
{
stmt = gimple_build_nop ();
gsi_replace (gsi_p, stmt, false);
}
break;
}
if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
{
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
gimple_call_set_fndecl (call_stmt, fndecl);
gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
}
tree lhs;
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
gimple_call_set_lhs (call_stmt, lhs);
tree fallthru_label;
fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g;
g = gimple_build_label (fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (TREE_TYPE (lhs),
boolean_false_node),
cctx->cancel_label, fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
break;
default:
break;
}
/* FALLTHRU */
default:
if ((ctx || task_shared_vars)
&& walk_gimple_op (stmt, lower_omp_regimplify_p,
ctx ? NULL : &wi))
{
/* Just remove clobbers, this should happen only if we have
"privatized" local addressable variables in SIMD regions,
the clobber isn't needed in that case and gimplifying address
of the ARRAY_REF into a pointer and creating MEM_REF based
clobber would create worse code than we get with the clobber
dropped. */
if (gimple_clobber_p (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
break;
}
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
}
break;
}
}
static void
lower_omp (gimple_seq *body, omp_context *ctx)
{
location_t saved_location = input_location;
gimple_stmt_iterator gsi;
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
/* During gimplification, we haven't folded statments inside offloading
or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
if (target_nesting_level || taskreg_nesting_level)
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
fold_stmt (&gsi);
input_location = saved_location;
}
/* Main entry point. */
static unsigned int
execute_lower_omp (void)
{
gimple_seq body;
int i;
omp_context *ctx;
/* This pass always runs, to provide PROP_gimple_lomp.
But often, there is nothing to do. */
if (flag_cilkplus == 0 && flag_openacc == 0 && flag_openmp == 0
&& flag_openmp_simd == 0)
return 0;
all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
delete_omp_context);
body = gimple_body (current_function_decl);
if (hsa_gen_requested_p ())
omp_grid_gridify_all_targets (&body);
scan_omp (&body, NULL);
gcc_assert (taskreg_nesting_level == 0);
FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
finish_taskreg_scan (ctx);
taskreg_contexts.release ();
if (all_contexts->root)
{
if (task_shared_vars)
push_gimplify_context ();
lower_omp (&body, NULL);
if (task_shared_vars)
pop_gimplify_context (NULL);
}
if (all_contexts)
{
splay_tree_delete (all_contexts);
all_contexts = NULL;
}
BITMAP_FREE (task_shared_vars);
/* If current function is a method, remove artificial dummy VAR_DECL created
for non-static data member privatization, they aren't needed for
debuginfo nor anything else, have been already replaced everywhere in the
IL and cause problems with LTO. */
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
remove_member_access_dummy_vars (DECL_INITIAL (current_function_decl));
return 0;
}
namespace {
const pass_data pass_data_lower_omp =
{
GIMPLE_PASS, /* type */
"omplower", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
PROP_gimple_lomp | PROP_gimple_lomp_dev, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_lower_omp : public gimple_opt_pass
{
public:
pass_lower_omp (gcc::context *ctxt)
: gimple_opt_pass (pass_data_lower_omp, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return execute_lower_omp (); }
}; // class pass_lower_omp
} // anon namespace
gimple_opt_pass *
make_pass_lower_omp (gcc::context *ctxt)
{
return new pass_lower_omp (ctxt);
}
/* The following is a utility to diagnose structured block violations.
It is not part of the "omplower" pass, as that's invoked too late. It
should be invoked by the respective front ends after gimplification. */
static splay_tree all_labels;
/* Check for mismatched contexts and generate an error if needed. Return
true if an error is detected. */
static bool
diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
gimple *branch_ctx, gimple *label_ctx)
{
gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
if (label_ctx == branch_ctx)
return false;
const char* kind = NULL;
if (flag_cilkplus)
{
if ((branch_ctx
&& gimple_code (branch_ctx) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
|| (label_ctx
&& gimple_code (label_ctx) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
kind = "Cilk Plus";
}
if (flag_openacc)
{
if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
|| (label_ctx && is_gimple_omp_oacc (label_ctx)))
{
gcc_checking_assert (kind == NULL);
kind = "OpenACC";
}
}
if (kind == NULL)
{
gcc_checking_assert (flag_openmp || flag_openmp_simd);
kind = "OpenMP";
}
/* Previously we kept track of the label's entire context in diagnose_sb_[12]
so we could traverse it and issue a correct "exit" or "enter" error
message upon a structured block violation.
We built the context by building a list with tree_cons'ing, but there is
no easy counterpart in gimple tuples. It seems like far too much work
for issuing exit/enter error messages. If someone really misses the
distinct error message... patches welcome. */
#if 0
/* Try to avoid confusing the user by producing and error message
with correct "exit" or "enter" verbiage. We prefer "exit"
unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
if (branch_ctx == NULL)
exit_p = false;
else
{
while (label_ctx)
{
if (TREE_VALUE (label_ctx) == branch_ctx)
{
exit_p = false;
break;
}
label_ctx = TREE_CHAIN (label_ctx);
}
}
if (exit_p)
error ("invalid exit from %s structured block", kind);
else
error ("invalid entry to %s structured block", kind);
#endif
/* If it's obvious we have an invalid entry, be specific about the error. */
if (branch_ctx == NULL)
error ("invalid entry to %s structured block", kind);
else
{
/* Otherwise, be vague and lazy, but efficient. */
error ("invalid branch to/from %s structured block", kind);
}
gsi_replace (gsi_p, gimple_build_nop (), false);
return true;
}
/* Pass 1: Create a minimal tree of structured blocks, and record
where each label is found. */
static tree
diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
gimple *inner_context;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
/* The minimal context here is just the current OMP construct. */
inner_context = stmt;
wi->info = inner_context;
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
inner_context = stmt;
wi->info = inner_context;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq (gimple_omp_for_pre_body (stmt),
diagnose_sb_1, NULL, wi);
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_LABEL:
splay_tree_insert (all_labels,
(splay_tree_key) gimple_label_label (
as_a <glabel *> (stmt)),
(splay_tree_value) context);
break;
default:
break;
}
return NULL_TREE;
}
/* Pass 2: Check each branch and see if its context differs from that of
the destination label's context. */
static tree
diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
splay_tree_node n;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
wi->info = stmt;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
wi->info = stmt;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
diagnose_sb_2, NULL, wi);
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
tree lab = gimple_cond_true_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
lab = gimple_cond_false_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
}
break;
case GIMPLE_GOTO:
{
tree lab = gimple_goto_dest (stmt);
if (TREE_CODE (lab) != LABEL_DECL)
break;
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context, n ? (gimple *) n->value : NULL);
}
break;
case GIMPLE_SWITCH:
{
gswitch *switch_stmt = as_a <gswitch *> (stmt);
unsigned int i;
for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
{
tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
if (n && diagnose_sb_0 (gsi_p, context, (gimple *) n->value))
break;
}
}
break;
case GIMPLE_RETURN:
diagnose_sb_0 (gsi_p, context, NULL);
break;
default:
break;
}
return NULL_TREE;
}
static unsigned int
diagnose_omp_structured_block_errors (void)
{
struct walk_stmt_info wi;
gimple_seq body = gimple_body (current_function_decl);
all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
memset (&wi, 0, sizeof (wi));
wi.want_locations = true;
walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
gimple_set_body (current_function_decl, body);
splay_tree_delete (all_labels);
all_labels = NULL;
return 0;
}
namespace {
const pass_data pass_data_diagnose_omp_blocks =
{
GIMPLE_PASS, /* type */
"*diagnose_omp_blocks", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_diagnose_omp_blocks : public gimple_opt_pass
{
public:
pass_diagnose_omp_blocks (gcc::context *ctxt)
: gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *)
{
return flag_cilkplus || flag_openacc || flag_openmp || flag_openmp_simd;
}
virtual unsigned int execute (function *)
{
return diagnose_omp_structured_block_errors ();
}
}; // class pass_diagnose_omp_blocks
} // anon namespace
gimple_opt_pass *
make_pass_diagnose_omp_blocks (gcc::context *ctxt)
{
return new pass_diagnose_omp_blocks (ctxt);
}
#include "gt-omp-low.h"
|
simd_loop_linear.c | /* Example of the linear clause on the simd construct
The linear clause defines how the j index variable relates to the
loop variable i that is used as an index into some of the arrays.
*/
void simd_loop_linear(double *a, double *b, double *c, int n,
int offset)
{
int i, j = 0;
#pragma omp simd linear(j:1)
for (i=offset; i<n; i+=2)
a[i] = b[j++] + c[i];
}
|
cpu.h | // Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define CONCAT(X, Y) X##_##Y
#define TEMPLATE(X, Y) CONCAT(X, Y)
#if defined(T)
// Template functions start here
typedef struct {
T* data;
long num;
long channels;
long height;
long width;
} TEMPLATE(rv_tensor, T);
//-------------------------------------------------------------------------------
// General functions
//-------------------------------------------------------------------------------
TEMPLATE(rv_tensor, T) TEMPLATE(rv_tensor_create, T)(T* data, long num,
long channels, long height, long width) {
TEMPLATE(rv_tensor, T) t;
t.data = data;
t.num = num;
t.channels = channels;
t.height = height;
t.width = width;
return t;
}
long TEMPLATE(rv_clamp_coord, T)(long x, long size) {
return x <= 0 ? 0 : (x >= size ? size - 1 : x);
}
long TEMPLATE(rv_get_idx, T)(TEMPLATE(rv_tensor, T) t, long n, long c, long h, long w) {
return ((n * t.channels + c) * t.height + h) * t.width + w;
}
T TEMPLATE(rv_get_val, T)(TEMPLATE(rv_tensor, T) t, long n, long c, long h, long w) {
return t.data[TEMPLATE(rv_get_idx, T)(t, n, c, h, w)];
}
T TEMPLATE(rv_get_val_hwbounds, T)(TEMPLATE(rv_tensor, T) t, long n, long c, long h, long w) {
h = TEMPLATE(rv_clamp_coord, T)(h, t.height);
w = TEMPLATE(rv_clamp_coord, T)(w, t.width);
return t.data[TEMPLATE(rv_get_idx, T)(t, n, c, h, w)];
}
void TEMPLATE(rv_zero, T)(TEMPLATE(rv_tensor, T) t) {
long idx = 0;
int n_elem = t.num * t.channels * t.height * t.width;
#pragma omp parallel for private(idx)
for(idx = 0; idx < n_elem; ++idx) {
t.data[idx] = 0;
}
}
T TEMPLATE(rv_box_fwd, T)(T x) {
return (-0.5 <= x) && (x < 0.5);
}
T TEMPLATE(rv_triangle_fwd, T)(T x) {
return (x+1) * ((-1 <= x) && (x < 0)) + (1-x) * ((0 <= x) & (x <= 1));
}
T TEMPLATE(rv_cubic_fwd, T)(T x) {
T absx = fabs(x);
T absx2 = absx*absx;
T absx3 = absx2*absx;
return (1.5*absx3 - 2.5*absx2 + 1) * (absx <= 1) +
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * ((1 < absx) && (absx <= 2));
}
T TEMPLATE(rv_box_bwd, T)(T x) {
return 0;
}
T TEMPLATE(rv_triangle_bwd, T)(T x) {
return (1) * ((-1 <= x) && (x < 0)) + (-1) * ((0 <= x) & (x <= 1));
}
T TEMPLATE(rv_cubic_bwd, T)(T x) {
T absx = fabs(x);
T sgn = (x > 0) - (x < 0);
return (4.5 * x * absx - 5 * x) * (absx <= 1) +
(-1.5 * x * absx + 5 * x - 4 * sgn) * ((1 < absx) && (absx <= 2));
}
#include "cpu_resample.h"
#elif !defined(RV_NN_CPU_)
#define RV_NN_CPU_
// Template instantiations start here
#define T float
#include "cpu.h"
#undef T
#define T double
#include "cpu.h"
#undef T
#endif //RV_NN_CPU_
|
GB_binop__minus_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int32)
// A*D function (colscale): GB (_AxD__minus_int32)
// D*A function (rowscale): GB (_DxB__minus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int32)
// C=scalar+B GB (_bind1st__minus_int32)
// C=scalar+B' GB (_bind1st_tran__minus_int32)
// C=A+scalar GB (_bind2nd__minus_int32)
// C=A'+scalar GB (_bind2nd_tran__minus_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_fp64
// op(A') function: GB_tran__ainv_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z ; GB_CAST_UNSIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_fp64
(
uint8_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NETLMv2_fmt_plug.c | /*
* NETLMv2_fmt.c -- LMv2 Challenge/Response
*
* Written by JoMo-Kun <jmk at foofus.net> in 2008
* and placed in the public domain.
*
* Performance fixes, OMP and utf-8 support by magnum 2010-2011
*
* This algorithm is designed for performing brute-force cracking of the LMv2
* challenge/response sets exchanged during network-based authentication
* attempts [1]. The captured challenge/response set from these attempts
* should be stored using the following format:
*
* USERNAME::DOMAIN:SERVER CHALLENGE:LMv2 RESPONSE:CLIENT CHALLENGE
*
* For example:
* Administrator::WORKGROUP:1122334455667788:6759A5A7EFB25452911DE7DE8296A0D8:F503236B200A5B3A
*
* It should be noted that a LMv2 authentication response is not same as a LM
* password hash, which can be extracted using tools such as FgDump [2]. In
* fact, a NTLM hash and not a LM hash is used within the LMv2 algorithm. LMv2
* challenge/response authentication typically takes place when the GPO
* "Network Security: LAN Manager authentication level" is configured to a setting
* that enforces the use of NTLMv2, such as "Send NTLMv2 response only\refuse
* LM & NTLM."
*
* LMv2 responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theLmv2Response
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_NETLMv2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_NETLMv2);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "md5.h"
#include "hmacmd5.h"
#include "byteorder.h"
#include "memdbg.h"
#ifndef uchar
#define uchar unsigned char
#endif
#define FORMAT_LABEL "netlmv2"
#define FORMAT_NAME "LMv2 C/R"
#define ALGORITHM_NAME "MD4 HMAC-MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125 /* lmcons.h - PWLEN (256) ? 127 ? */
#define USERNAME_LENGTH 60 /* lmcons.h - UNLEN (256) / LM20_UNLEN (20) */
#define DOMAIN_LENGTH 45 /* lmcons.h - CNLEN / DNLEN */
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define CHALLENGE_LENGTH 32
#define SALT_SIZE 16 + 1 + 2 * (USERNAME_LENGTH + DOMAIN_LENGTH) + 1
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH 32
#define TOTAL_LENGTH 12 + USERNAME_LENGTH + DOMAIN_LENGTH + CHALLENGE_LENGTH + CIPHERTEXT_LENGTH
// these may be altered in init() if running OMP
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define OMP_SCALE 1536
static struct fmt_tests tests[] = {
{"", "1337adminPASS", {"FOODOM\\Administrator", "", "", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} },
{"$NETLMv2$ADMINISTRATORFOODOM$1122334455667788$6F64C5C1E35F68DD80388C0F00F34406$F0F3FF27037AA69F", "1337adminPASS"},
{"$NETLMv2$USER1$1122334455667788$B1D163EA5881504F3963DC50FCDC26C1$EB4D9E8138149E20", "foobar"},
{"$NETLMv2$ATEST$1122334455667788$83B59F1536D3321DBF1FAEC14ADB1675$A1E7281FE8C10E53", "SomeFancyP4$$w0rdHere"},
{"", "1337adminPASS", {"administrator", "", "FOODOM", "1122334455667788", "6F64C5C1E35F68DD80388C0F00F34406", "F0F3FF27037AA69F"} },
{"", "foobar", {"user1", "", "", "1122334455667788", "B1D163EA5881504F3963DC50FCDC26C1", "EB4D9E8138149E20"} },
{"", "SomeFancyP4$$w0rdHere", {"aTest", "", "", "1122334455667788", "83B59F1536D3321DBF1FAEC14ADB1675", "A1E7281FE8C10E53"} },
{NULL}
};
static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static uchar (*output)[BINARY_SIZE];
static HMACMD5Context (*saved_ctx);
static int keys_prepared;
static unsigned char *challenge;
#if !defined(uint16) && !defined(HAVE_UINT16_FROM_RPC_RPC_H)
#if (SIZEOF_SHORT == 4)
#define uint16 __ERROR___CANNOT_DETERMINE_TYPE_FOR_INT16;
#else /* SIZEOF_SHORT != 4 */
#define uint16 unsigned short
#endif /* SIZEOF_SHORT != 4 */
#endif
#if !defined(int16) && !defined(HAVE_INT16_FROM_RPC_RPC_H)
#if (SIZEOF_SHORT == 4)
#define int16 __ERROR___CANNOT_DETERMINE_TYPE_FOR_INT16;
#else /* SIZEOF_SHORT != 4 */
#define int16 short
#endif /* SIZEOF_SHORT != 4 */
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE);
saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
output = mem_calloc_tiny(sizeof(*output) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_ctx = mem_calloc_tiny(sizeof(*saved_ctx) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, "$NETLMv2$", 9)!=0) return 0;
pos = &ciphertext[9];
/* Validate Username and Domain Length */
for (pos2 = pos; *pos2 != '$'; pos2++)
if ((unsigned char)*pos2 < 0x20)
return 0;
if ( !(*pos2 && (pos2 - pos <= USERNAME_LENGTH + DOMAIN_LENGTH)) )
return 0;
/* Validate Server Challenge Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate LMv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
/* Validate Client Challenge Length */
pos2++; pos = pos2;
for (; atoi16[ARCH_INDEX(*pos2)] != 0x7F; pos2++);
if (pos2 - pos != CHALLENGE_LENGTH / 2)
return 0;
if (pos2[0] != '\0')
return 0;
return 1;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *srv_challenge = split_fields[3];
char *nethashv2 = split_fields[4];
char *cli_challenge = split_fields[5];
char *login = split_fields[0];
char *uid = split_fields[2];
char *identity = NULL, *tmp;
if (!strncmp(split_fields[1], "$NETLMv2$", 9))
return split_fields[1];
if (!split_fields[0]||!split_fields[2]||!split_fields[3]||!split_fields[4]||!split_fields[5])
return split_fields[1];
/* DOMAIN\USER: -or- USER::DOMAIN: */
if ((tmp = strstr(login, "\\")) != NULL) {
identity = (char *) mem_alloc(strlen(login));
strcpy(identity, tmp + 1);
/* Upper-Case Username - Not Domain */
enc_strupper(identity);
strncat(identity, login, tmp - login);
}
else {
identity = (char *) mem_alloc(strlen(login) + strlen(uid) + 1);
strcpy(identity, login);
enc_strupper(identity);
strcat(identity, uid);
}
tmp = (char *) mem_alloc(9 + strlen(identity) + 1 + strlen(srv_challenge) + 1 + strlen(nethashv2) + 1 + strlen(cli_challenge) + 1);
sprintf(tmp, "$NETLMv2$%s$%s$%s$%s", identity, srv_challenge, nethashv2, cli_challenge);
MEM_FREE(identity);
if (valid(tmp, self)) {
char *cp = str_alloc_copy(tmp);
MEM_FREE(tmp);
return cp;
}
MEM_FREE(tmp);
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1];
char *pos = NULL;
int identity_length = 0;
/* Calculate identity length */
for (pos = ciphertext + 9; *pos != '$'; pos++);
identity_length = pos - (ciphertext + 9);
memset(out, 0, TOTAL_LENGTH + 1);
memcpy(out, ciphertext, strlen(ciphertext));
strlwr(&out[10 + identity_length]); /* Exclude: $NETLMv2$USERDOMAIN$ */
return out;
}
static void *get_binary(char *ciphertext)
{
static uchar *binary;
char *pos = NULL;
int i, identity_length;
if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
for (pos = ciphertext + 9; *pos != '$'; pos++);
identity_length = pos - (ciphertext + 9);
ciphertext += 9 + identity_length + 1 + CHALLENGE_LENGTH / 2 + 1;
for (i=0; i<BINARY_SIZE; i++)
{
binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4;
binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]);
}
return binary;
}
/* Calculate the LMv2 response for the given challenge, using the
specified authentication identity (username and domain), password
and client nonce.
*/
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i = 0;
#ifdef _OPENMP
#pragma omp parallel for
for(i = 0; i < count; i++)
#endif
{
unsigned char ntlm_v2_hash[16];
HMACMD5Context ctx; // can't be moved above the OMP pragma
if (!keys_prepared) {
int len;
unsigned char ntlm[16];
/* Generate 16-byte NTLM hash */
len = E_md4hash(saved_plain[i], saved_len[i], ntlm);
// We do key setup of the next HMAC_MD5 here (once per salt)
hmac_md5_init_K16(ntlm, &saved_ctx[i]);
if (len <= 0)
saved_plain[i][-len] = 0; // match truncation
}
/* HMAC-MD5(Username + Domain, NTLM Hash) */
memcpy(&ctx, &saved_ctx[i], sizeof(ctx));
hmac_md5_update(&challenge[17], (int)challenge[16], &ctx);
hmac_md5_final(ntlm_v2_hash, &ctx);
/* Generate 16-byte non-client nonce portion of LMv2 Response */
/* HMAC-MD5(Challenge + Nonce, NTLMv2 Hash) + Nonce */
hmac_md5(ntlm_v2_hash, challenge, 16, (unsigned char*)output[i]);
}
keys_prepared = 1;
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for(index=0; index<count; index++)
if (!memcmp(output[index], binary, BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(output[index], binary, BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return !memcmp(output[index], get_binary(source), BINARY_SIZE);
}
/* We're essentially using three salts, but we're going to pack it into a single blob for now.
|Client Challenge (8 Bytes)|Server Challenge (8 Bytes)|Unicode(Username (<=20).Domain (<=15))
*/
static void *get_salt(char *ciphertext)
{
static unsigned char *binary_salt;
unsigned char identity[USERNAME_LENGTH + DOMAIN_LENGTH + 1];
UTF16 identity_ucs2[USERNAME_LENGTH + DOMAIN_LENGTH + 1];
int i, identity_length;
int identity_ucs2_length;
char *pos = NULL;
if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
memset(binary_salt, 0, SALT_SIZE);
/* Calculate identity length */
for (pos = ciphertext + 9; *pos != '$'; pos++);
identity_length = pos - (ciphertext + 9);
/* Convert identity (username + domain) string to NT unicode */
strnzcpy((char *)identity, ciphertext + 9, sizeof(identity));
identity_ucs2_length = enc_to_utf16((UTF16 *)identity_ucs2, USERNAME_LENGTH + DOMAIN_LENGTH, (UTF8 *)identity, identity_length) * sizeof(int16);
if (identity_ucs2_length < 0) // Truncated at Unicode conversion.
identity_ucs2_length = strlen16((UTF16 *)identity_ucs2) * sizeof(int16);
binary_salt[16] = (unsigned char)identity_ucs2_length;
memcpy(&binary_salt[17], (char *)identity_ucs2, identity_ucs2_length);
/* Set server challenge */
ciphertext += 10 + identity_length;
for (i = 0; i < 8; i++)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
/* Set client challenge */
ciphertext += 2 + CHALLENGE_LENGTH / 2 + CIPHERTEXT_LENGTH;
for (i = 0; i < 8; ++i)
binary_salt[i + 8] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
/* Return a concatenation of the server and client challenges and the identity value */
return (void*)binary_salt;
}
static void set_salt(void *salt)
{
challenge = salt;
}
static void set_key(char *key, int index)
{
saved_len[index] = strlen(key);
memcpy((char *)saved_plain[index], key, saved_len[index] + 1);
keys_prepared = 0;
}
static char *get_key(int index)
{
return (char *)saved_plain[index];
}
static int salt_hash(void *salt)
{
// Hash the client challenge (in case server salt was spoofed)
return (*(ARCH_WORD_32 *)salt+8) & (SALT_HASH_SIZE - 1);
}
static int get_hash_0(int index)
{
return *(ARCH_WORD_32 *)output[index] & 0xF;
}
static int get_hash_1(int index)
{
return *(ARCH_WORD_32 *)output[index] & 0xFF;
}
static int get_hash_2(int index)
{
return *(ARCH_WORD_32 *)output[index] & 0xFFF;
}
static int get_hash_3(int index)
{
return *(ARCH_WORD_32 *)output[index] & 0xFFFF;
}
static int get_hash_4(int index)
{
return *(ARCH_WORD_32 *)output[index] & 0xFFFFF;
}
static int get_hash_5(int index)
{
return *(ARCH_WORD_32 *)output[index] & 0xFFFFFF;
}
static int get_hash_6(int index)
{
return *(ARCH_WORD_32 *)output[index] & 0x7FFFFFF;
}
struct fmt_main fmt_NETLMv2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
atomic-4.c | /* PR middle-end/35611 */
/* { dg-options "-O2" } */
extern void abort (void);
int
main (void)
{
long double d = .0L;
int i;
#pragma omp parallel for shared (d)
for (i = 0; i < 1000; i++)
#pragma omp atomic
d += 1.0L;
if (d != 1000.0L)
abort ();
return 0;
}
|
GxB_UnaryOp_ztype_name.c | //------------------------------------------------------------------------------
// GxB_UnaryOp_ztype_name: return the type_name of z for z=f(x)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_UnaryOp_ztype_name // return the name of the type of z
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_UnaryOp unaryop
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_UnaryOp_ztype_name (type_name, op)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (unaryop) ;
ASSERT_UNARYOP_OK (unaryop, "unaryop for ztype_name", GB0) ;
//--------------------------------------------------------------------------
// get the type_name
//--------------------------------------------------------------------------
memcpy (type_name, unaryop->ztype->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
munit.c | /* Copyright (c) 2013-2017 Evan Nemerson <evan@nemerson.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*** Configuration ***/
/* This is just where the output from the test goes. It's really just
* meant to let you choose stdout or stderr, but if anyone really want
* to direct it to a file let me know, it would be fairly easy to
* support. */
#if !defined(MUNIT_OUTPUT_FILE)
# define MUNIT_OUTPUT_FILE stdout
#endif
/* This is a bit more useful; it tells µnit how to format the seconds in
* timed tests. If your tests run for longer you might want to reduce
* it, and if your computer is really fast and your tests are tiny you
* can increase it. */
#if !defined(MUNIT_TEST_TIME_FORMAT)
# define MUNIT_TEST_TIME_FORMAT "0.8f"
#endif
/* If you have long test names you might want to consider bumping
* this. The result information takes 43 characters. */
#if !defined(MUNIT_TEST_NAME_LEN)
# define MUNIT_TEST_NAME_LEN 37
#endif
/* If you don't like the timing information, you can disable it by
* defining MUNIT_DISABLE_TIMING. */
#if !defined(MUNIT_DISABLE_TIMING)
# define MUNIT_ENABLE_TIMING
#endif
/*** End configuration ***/
#if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L)
# undef _POSIX_C_SOURCE
#endif
#if !defined(_POSIX_C_SOURCE)
# define _POSIX_C_SOURCE 200809L
#endif
/* Solaris freaks out if you try to use a POSIX or SUS standard without
* the "right" C standard. */
#if defined(_XOPEN_SOURCE)
# undef _XOPEN_SOURCE
#endif
#if defined(__STDC_VERSION__)
# if __STDC_VERSION__ >= 201112L
# define _XOPEN_SOURCE 700
# elif __STDC_VERSION__ >= 199901L
# define _XOPEN_SOURCE 600
# endif
#endif
/* Because, according to Microsoft, POSIX is deprecated. You've got
* to appreciate the chutzpah. */
#if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE)
# define _CRT_NONSTDC_NO_DEPRECATE
#endif
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# include <stdbool.h>
#elif defined(_WIN32)
/* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */
#endif
#include <limits.h>
#include <time.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <setjmp.h>
#if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32)
#define MUNIT_NL_LANGINFO
#include <locale.h>
#include <langinfo.h>
#include <strings.h>
#endif
#if !defined(_WIN32)
# include <unistd.h>
# include <sys/types.h>
# include <sys/wait.h>
#else
# include <windows.h>
# include <io.h>
# include <fcntl.h>
# if !defined(STDERR_FILENO)
# define STDERR_FILENO _fileno(stderr)
# endif
#endif
#include "munit.h"
#define MUNIT_STRINGIFY(x) #x
#define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x)
#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local)
# define MUNIT_THREAD_LOCAL _Thread_local
#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
# define MUNIT_THREAD_LOCAL __thread
#elif defined(_WIN32)
# define MUNIT_THREAD_LOCAL __declspec(thread)
#endif
/* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... }
* while (0)', or 'do { ... } while (true)'. I'm pretty sure nobody
* at Microsoft compiles with /W4. */
#if defined(_MSC_VER) && (_MSC_VER <= 1800)
#pragma warning(disable: 4127)
#endif
#if defined(_WIN32) || defined(__EMSCRIPTEN__)
# define MUNIT_NO_FORK
#endif
#if defined(__EMSCRIPTEN__)
# define MUNIT_NO_BUFFER
#endif
/*** Logging ***/
static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO;
static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR;
#if defined(MUNIT_THREAD_LOCAL)
static MUNIT_THREAD_LOCAL bool munit_error_jmp_buf_valid = false;
static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf;
#endif
/* At certain warning levels, mingw will trigger warnings about
* suggesting the format attribute, which we've explicity *not* set
* because it will then choke on our attempts to use the MS-specific
* I64 modifier for size_t (which we have to use since MSVC doesn't
* support the C99 z modifier). */
#if defined(__MINGW32__) || defined(__MINGW64__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
#endif
MUNIT_PRINTF(5,0)
static void
munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) {
if (level < munit_log_level_visible)
return;
switch (level) {
case MUNIT_LOG_DEBUG:
fputs("Debug", fp);
break;
case MUNIT_LOG_INFO:
fputs("Info", fp);
break;
case MUNIT_LOG_WARNING:
fputs("Warning", fp);
break;
case MUNIT_LOG_ERROR:
fputs("Error", fp);
break;
default:
munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level);
return;
}
fputs(": ", fp);
if (filename != NULL)
fprintf(fp, "%s:%d: ", filename, line);
vfprintf(fp, format, ap);
fputc('\n', fp);
}
MUNIT_PRINTF(3,4)
static void
munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, fp, NULL, 0, format, ap);
va_end(ap);
}
static void
munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) {
munit_logf_internal(level, fp, "%s", message);
}
void
munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, stderr, filename, line, format, ap);
va_end(ap);
if (level >= munit_log_level_fatal) {
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
}
void
munit_errorf_ex(const char* filename, int line, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap);
va_end(ap);
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
#if defined(__MINGW32__) || defined(__MINGW64__)
#pragma GCC diagnostic pop
#endif
#if !defined(MUNIT_STRERROR_LEN)
# define MUNIT_STRERROR_LEN 80
#endif
static void
munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) {
#if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API))
munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno);
#else
char munit_error_str[MUNIT_STRERROR_LEN];
munit_error_str[0] = '\0';
#if !defined(_WIN32)
strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN);
#else
strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno);
#endif
munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno);
#endif
}
/*** Memory allocation ***/
void*
munit_malloc_ex(const char* filename, int line, size_t size) {
void* ptr;
if (size == 0)
return NULL;
ptr = calloc(1, size);
if (MUNIT_UNLIKELY(ptr == NULL)) {
munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size);
}
return ptr;
}
/*** Timer code ***/
#if defined(MUNIT_ENABLE_TIMING)
#define psnip_uint64_t munit_uint64_t
#define psnip_uint32_t munit_uint32_t
/* Code copied from portable-snippets
* <https://github.com/nemequ/portable-snippets/>. If you need to
* change something, please do it there so we can keep the code in
* sync. */
/* Clocks (v1)
* Portable Snippets - https://gitub.com/nemequ/portable-snippets
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* https://creativecommons.org/publicdomain/zero/1.0/
*/
#if !defined(PSNIP_CLOCK_H)
#define PSNIP_CLOCK_H
#if !defined(psnip_uint64_t)
# include "../exact-int/exact-int.h"
#endif
#if !defined(PSNIP_CLOCK_STATIC_INLINE)
# if defined(__GNUC__)
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__))
# else
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES
# endif
# define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static
#endif
enum PsnipClockType {
/* This clock provides the current time, in units since 1970-01-01
* 00:00:00 UTC not including leap seconds. In other words, UNIX
* time. Keep in mind that this clock doesn't account for leap
* seconds, and can go backwards (think NTP adjustments). */
PSNIP_CLOCK_TYPE_WALL = 1,
/* The CPU time is a clock which increases only when the current
* process is active (i.e., it doesn't increment while blocking on
* I/O). */
PSNIP_CLOCK_TYPE_CPU = 2,
/* Monotonic time is always running (unlike CPU time), but it only
ever moves forward unless you reboot the system. Things like NTP
adjustments have no effect on this clock. */
PSNIP_CLOCK_TYPE_MONOTONIC = 3
};
struct PsnipClockTimespec {
psnip_uint64_t seconds;
psnip_uint64_t nanoseconds;
};
/* Methods we support: */
#define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1
#define PSNIP_CLOCK_METHOD_TIME 2
#define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3
#define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4
#define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5
#define PSNIP_CLOCK_METHOD_CLOCK 6
#define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7
#define PSNIP_CLOCK_METHOD_GETRUSAGE 8
#define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9
#define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10
#include <assert.h>
#if defined(HEDLEY_UNREACHABLE)
# define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE()
#else
# define PSNIP_CLOCK_UNREACHABLE() assert(0)
#endif
/* Choose an implementation */
/* #undef PSNIP_CLOCK_WALL_METHOD */
/* #undef PSNIP_CLOCK_CPU_METHOD */
/* #undef PSNIP_CLOCK_MONOTONIC_METHOD */
/* We want to be able to detect the libc implementation, so we include
<limits.h> (<features.h> isn't available everywhere). */
#if defined(__unix__) || defined(__unix) || defined(__linux__)
# include <limits.h>
# include <unistd.h>
#endif
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)
/* These are known to work without librt. If you know of others
* please let us know so we can add them. */
# if \
(defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \
(defined(__FreeBSD__))
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# elif !defined(PSNIP_CLOCK_NO_LIBRT)
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# endif
#endif
#if defined(_WIN32)
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
# endif
#endif
#if defined(__MACH__) && !defined(__gnu_hurd__)
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
# endif
#endif
#if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME)
# include <time.h>
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# if defined(CLOCK_REALTIME_PRECISE)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE
# elif !defined(__sun)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME
# endif
# endif
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID
# elif defined(CLOCK_VIRTUAL)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL
# endif
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# if defined(CLOCK_MONOTONIC_RAW)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# elif defined(CLOCK_MONOTONIC_PRECISE)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE
# elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# endif
# endif
#endif
#if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L)
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY
# endif
#endif
#if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME
#endif
#if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK
#endif
/* Primarily here for testing. */
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC)
# error No monotonic clock found.
#endif
/* Implementations */
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME))
# include <time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY))
# include <sys/time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64))
# include <windows.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE))
# include <sys/time.h>
# include <sys/resource.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME))
# include <CoreServices/CoreServices.h>
# include <mach/mach.h>
# include <mach/mach_time.h>
#endif
/*** Implementations ***/
#define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL))
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME))
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock__clock_getres (clockid_t clk_id) {
struct timespec res;
int r;
r = clock_getres(clk_id, &res);
if (r != 0)
return 0;
return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec);
}
PSNIP_CLOCK__FUNCTION int
psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) {
struct timespec ts;
if (clock_gettime(clk_id, &ts) != 0)
return -10;
res->seconds = (psnip_uint64_t) (ts.tv_sec);
res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec);
return 0;
}
#endif
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_wall_get_precision (void) {
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
return 1000000;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
return 1;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_wall_get_time (struct PsnipClockTimespec* res) {
(void) res;
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return -2;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
res->seconds = time(NULL);
res->nanoseconds = 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
struct timeval tv;
if (gettimeofday(&tv, NULL) != 0)
return -6;
res->seconds = tv.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_cpu_get_precision (void) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
return CLOCKS_PER_SEC;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
return PSNIP_CLOCK_NSEC_PER_SEC / 100;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
clock_t t = clock();
if (t == ((clock_t) -1))
return -5;
res->seconds = t / CLOCKS_PER_SEC;
res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
FILETIME CreationTime, ExitTime, KernelTime, UserTime;
LARGE_INTEGER date, adjust;
if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime))
return -7;
/* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */
date.HighPart = UserTime.dwHighDateTime;
date.LowPart = UserTime.dwLowDateTime;
adjust.QuadPart = 11644473600000 * 10000;
date.QuadPart -= adjust.QuadPart;
res->seconds = date.QuadPart / 10000000;
res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100);
#elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) != 0)
return -8;
res->seconds = usage.ru_utime.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
(void) res;
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_monotonic_get_precision (void) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
return (psnip_uint32_t) (tbi.numer / tbi.denom);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
return 1000;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER Frequency;
QueryPerformanceFrequency(&Frequency);
return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart);
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
psnip_uint64_t nsec = mach_absolute_time();
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom);
res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC;
res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER t, f;
if (QueryPerformanceCounter(&t) == 0)
return -12;
QueryPerformanceFrequency(&f);
res->seconds = t.QuadPart / f.QuadPart;
res->nanoseconds = t.QuadPart % f.QuadPart;
if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC)
res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC;
else
res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
const ULONGLONG msec = GetTickCount64();
res->seconds = msec / 1000;
res->nanoseconds = sec % 1000;
#else
return -2;
#endif
return 0;
}
/* Returns the number of ticks per second for the specified clock.
* For example, a clock with millisecond precision would return 1000,
* and a clock with 1 second (such as the time() function) would
* return 1.
*
* If the requested clock isn't available, it will return 0.
* Hopefully this will be rare, but if it happens to you please let us
* know so we can work on finding a way to support your system.
*
* Note that different clocks on the same system often have a
* different precisions.
*/
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_get_precision (enum PsnipClockType clock_type) {
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:
return psnip_clock_monotonic_get_precision ();
case PSNIP_CLOCK_TYPE_CPU:
return psnip_clock_cpu_get_precision ();
case PSNIP_CLOCK_TYPE_WALL:
return psnip_clock_wall_get_precision ();
}
PSNIP_CLOCK_UNREACHABLE();
return 0;
}
/* Set the provided timespec to the requested time. Returns 0 on
* success, or a negative value on failure. */
PSNIP_CLOCK__FUNCTION int
psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) {
assert(res != NULL);
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:
return psnip_clock_monotonic_get_time (res);
case PSNIP_CLOCK_TYPE_CPU:
return psnip_clock_cpu_get_time (res);
case PSNIP_CLOCK_TYPE_WALL:
return psnip_clock_wall_get_time (res);
}
return -1;
}
#endif /* !defined(PSNIP_CLOCK_H) */
static psnip_uint64_t
munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) {
psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC;
if (end->nanoseconds < start->nanoseconds) {
r -= (start->nanoseconds - end->nanoseconds);
} else {
r += (end->nanoseconds - start->nanoseconds);
}
return r;
}
#endif /* defined(MUNIT_ENABLE_TIMING) */
/*** PRNG stuff ***/
/* This is (unless I screwed up, which is entirely possible) the
* version of PCG with 32-bit state. It was chosen because it has a
* small enough state that we should reliably be able to use CAS
* instead of requiring a lock for thread-safety.
*
* If I did screw up, I probably will not bother changing it unless
* there is a significant bias. It's really not important this be
* particularly strong, as long as it is fairly random it's much more
* important that it be reproducible, so bug reports have a better
* chance of being reproducible. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__)
# define HAVE_STDATOMIC
#elif defined(__clang__)
# if __has_extension(c_atomic)
# define HAVE_CLANG_ATOMICS
# endif
#endif
/* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */
#if defined(__clang__) && defined(_WIN32)
# undef HAVE_STDATOMIC
# if defined(__c2__)
# undef HAVE_CLANG_ATOMICS
# endif
#endif
#if defined(_OPENMP)
# define ATOMIC_UINT32_T uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(HAVE_STDATOMIC)
# include <stdatomic.h>
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x)
#elif defined(HAVE_CLANG_ATOMICS)
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(_WIN32)
# define ATOMIC_UINT32_T volatile LONG
# define ATOMIC_UINT32_INIT(x) (x)
#else
# define ATOMIC_UINT32_T volatile uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#endif
static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42);
#if defined(_OPENMP)
static inline void
munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) {
#pragma omp critical (munit_atomics)
*dest = value;
}
static inline uint32_t
munit_atomic_load(ATOMIC_UINT32_T* src) {
int ret;
#pragma omp critical (munit_atomics)
ret = *src;
return ret;
}
static inline uint32_t
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
bool ret;
#pragma omp critical (munit_atomics)
{
if (*dest == *expected) {
*dest = desired;
ret = true;
} else {
ret = false;
}
}
return ret;
}
#elif defined(HAVE_STDATOMIC)
# define munit_atomic_store(dest, value) atomic_store(dest, value)
# define munit_atomic_load(src) atomic_load(src)
# define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value)
#elif defined(HAVE_CLANG_ATOMICS)
# define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
# define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ >= 4)
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value)
#elif defined(_WIN32) /* Untested */
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected))
#else
# warning No atomic implementation, PRNG will not be thread-safe
# define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
static inline bool
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
if (*dest == *expected) {
*dest = desired;
return true;
} else {
return false;
}
}
#endif
#define MUNIT_PRNG_MULTIPLIER (747796405U)
#define MUNIT_PRNG_INCREMENT (1729U)
static munit_uint32_t
munit_rand_next_state(munit_uint32_t state) {
return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT;
}
static munit_uint32_t
munit_rand_from_state(munit_uint32_t state) {
munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U);
res ^= res >> 22;
return res;
}
void
munit_rand_seed(munit_uint32_t seed) {
munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
munit_atomic_store(&munit_rand_state, state);
}
static munit_uint32_t
munit_rand_generate_seed(void) {
struct PsnipClockTimespec wc;
munit_uint32_t seed, state;
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc);
seed = (munit_uint32_t) wc.nanoseconds;
state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
return munit_rand_from_state(state);
}
static munit_uint32_t
munit_rand_state_uint32(munit_uint32_t* state) {
const munit_uint32_t old = *state;
*state = munit_rand_next_state(old);
return munit_rand_from_state(old);
}
munit_uint32_t
munit_rand_uint32(void) {
munit_uint32_t old, state;
do {
old = munit_atomic_load(&munit_rand_state);
state = munit_rand_next_state(old);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return munit_rand_from_state(old);
}
static void
munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
size_t members_remaining = size / sizeof(munit_uint32_t);
size_t bytes_remaining = size % sizeof(munit_uint32_t);
munit_uint8_t* b = data;
munit_uint32_t rv;
while (members_remaining-- > 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, sizeof(munit_uint32_t));
b += sizeof(munit_uint32_t);
}
if (bytes_remaining != 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, bytes_remaining);
}
}
void
munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
munit_uint32_t old, state;
do {
state = old = munit_atomic_load(&munit_rand_state);
munit_rand_state_memory(&state, size, data);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
}
static munit_uint32_t
munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) {
/* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same
* as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not
* to avoid compiler warnings.
*/
const munit_uint32_t min = (~max + 1U) % max;
munit_uint32_t x;
if (max == (~((munit_uint32_t) 0U)))
return munit_rand_state_uint32(state) ^ salt;
max++;
do {
x = munit_rand_state_uint32(state) ^ salt;
} while (x < min);
return x % max;
}
static munit_uint32_t
munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) {
munit_uint32_t old, state;
munit_uint32_t retval;
do {
state = old = munit_atomic_load(&munit_rand_state);
retval = munit_rand_state_at_most(&state, salt, max);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
int
munit_rand_int_range(int min, int max) {
munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min;
if (min > max)
return munit_rand_int_range(max, min);
if (range > (~((munit_uint32_t) 0U)))
range = (~((munit_uint32_t) 0U));
return min + munit_rand_at_most(0, (munit_uint32_t) range);
}
double
munit_rand_double(void) {
munit_uint32_t old, state;
double retval = 0.0;
do {
state = old = munit_atomic_load(&munit_rand_state);
/* See http://mumble.net/~campbell/tmp/random_real.c for how to do
* this right. Patches welcome if you feel that this is too
* biased. */
retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
/*** Test suite handling ***/
typedef struct {
unsigned int successful;
unsigned int skipped;
unsigned int failed;
unsigned int errored;
#if defined(MUNIT_ENABLE_TIMING)
munit_uint64_t cpu_clock;
munit_uint64_t wall_clock;
#endif
} MunitReport;
typedef struct {
const char* prefix;
const MunitSuite* suite;
const char** tests;
munit_uint32_t seed;
unsigned int iterations;
MunitParameter* parameters;
bool single_parameter_mode;
void* user_data;
MunitReport report;
bool colorize;
bool fork;
bool show_stderr;
bool fatal_failures;
} MunitTestRunner;
const char*
munit_parameters_get(const MunitParameter params[], const char* key) {
const MunitParameter* param;
for (param = params ; param != NULL && param->name != NULL ; param++)
if (strcmp(param->name, key) == 0)
return param->value;
return NULL;
}
static void
munit_print_time(FILE* fp, munit_uint64_t nanoseconds) {
fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC));
}
/* Add a paramter to an array of parameters. */
static MunitResult
munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) {
*params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2));
if (*params == NULL)
return MUNIT_ERROR;
(*params)[*params_size].name = name;
(*params)[*params_size].value = value;
(*params_size)++;
(*params)[*params_size].name = NULL;
(*params)[*params_size].value = NULL;
return MUNIT_OK;
}
/* Concatenate two strings, but just return one of the components
* unaltered if the other is NULL or "". */
static char*
munit_maybe_concat(size_t* len, char* prefix, char* suffix) {
char* res;
size_t res_l;
const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0;
const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0;
if (prefix_l == 0 && suffix_l == 0) {
res = NULL;
res_l = 0;
} else if (prefix_l == 0 && suffix_l != 0) {
res = suffix;
res_l = suffix_l;
} else if (prefix_l != 0 && suffix_l == 0) {
res = prefix;
res_l = prefix_l;
} else {
res_l = prefix_l + suffix_l;
res = malloc(res_l + 1);
memcpy(res, prefix, prefix_l);
memcpy(res + prefix_l, suffix, suffix_l);
res[res_l] = 0;
}
if (len != NULL)
*len = res_l;
return res;
}
/* Possbily free a string returned by munit_maybe_concat. */
static void
munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) {
if (prefix != s && suffix != s)
free(s);
}
/* Cheap string hash function, just used to salt the PRNG. */
static munit_uint32_t
munit_str_hash(const char* name) {
const char *p;
munit_uint32_t h = 5381U;
for (p = name; *p != '\0'; p++)
h = (h << 5) + h + *p;
return h;
}
static void
munit_splice(int from, int to) {
munit_uint8_t buf[1024];
#if !defined(_WIN32)
ssize_t len;
ssize_t bytes_written;
ssize_t write_res;
#else
int len;
int bytes_written;
int write_res;
#endif
do {
len = read(from, buf, sizeof(buf));
if (len > 0) {
bytes_written = 0;
do {
write_res = write(to, buf + bytes_written, len - bytes_written);
if (write_res < 0)
break;
bytes_written += write_res;
} while (bytes_written < len);
}
else
break;
} while (true);
}
/* This is the part that should be handled in the child process */
static MunitResult
munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) {
unsigned int iterations = runner->iterations;
MunitResult result = MUNIT_FAIL;
#if defined(MUNIT_ENABLE_TIMING)
struct PsnipClockTimespec wall_clock_begin, wall_clock_end;
struct PsnipClockTimespec cpu_clock_begin, cpu_clock_end;
#endif
unsigned int i = 0;
if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION)
iterations = 1;
else if (iterations == 0)
iterations = runner->suite->iterations;
munit_rand_seed(runner->seed);
do {
void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin);
#endif
result = test->test(params, data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end);
#endif
if (test->tear_down != NULL)
test->tear_down(data);
if (MUNIT_LIKELY(result == MUNIT_OK)) {
report->successful++;
#if defined(MUNIT_ENABLE_TIMING)
report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end);
report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end);
#endif
} else {
switch ((int) result) {
case MUNIT_SKIP:
report->skipped++;
break;
case MUNIT_FAIL:
report->failed++;
break;
case MUNIT_ERROR:
report->errored++;
break;
default:
break;
}
break;
}
} while (++i < iterations);
return result;
}
#if defined(MUNIT_EMOTICON)
# define MUNIT_RESULT_STRING_OK ":)"
# define MUNIT_RESULT_STRING_SKIP ":|"
# define MUNIT_RESULT_STRING_FAIL ":("
# define MUNIT_RESULT_STRING_ERROR ":o"
# define MUNIT_RESULT_STRING_TODO ":/"
#else
# define MUNIT_RESULT_STRING_OK "OK "
# define MUNIT_RESULT_STRING_SKIP "SKIP "
# define MUNIT_RESULT_STRING_FAIL "FAIL "
# define MUNIT_RESULT_STRING_ERROR "ERROR"
# define MUNIT_RESULT_STRING_TODO "TODO "
#endif
static void
munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) {
if (runner->colorize)
fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string);
else
fputs(string, MUNIT_OUTPUT_FILE);
}
#if !defined(MUNIT_NO_BUFFER)
static int
munit_replace_stderr(FILE* stderr_buf) {
if (stderr_buf != NULL) {
const int orig_stderr = dup(STDERR_FILENO);
int errfd = fileno(stderr_buf);
if (MUNIT_UNLIKELY(errfd == -1)) {
exit(EXIT_FAILURE);
}
dup2(errfd, STDERR_FILENO);
return orig_stderr;
}
return -1;
}
static void
munit_restore_stderr(int orig_stderr) {
if (orig_stderr != -1) {
dup2(orig_stderr, STDERR_FILENO);
close(orig_stderr);
}
}
#endif /* !defined(MUNIT_NO_BUFFER) */
/* Run a test with the specified parameters. */
static void
munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) {
MunitResult result = MUNIT_OK;
MunitReport report = {
0, 0, 0, 0,
#if defined(MUNIT_ENABLE_TIMING)
0, 0
#endif
};
unsigned int output_l;
bool first;
const MunitParameter* param;
FILE* stderr_buf;
#if !defined(MUNIT_NO_FORK)
int pipefd[2];
pid_t fork_pid;
int orig_stderr;
ssize_t bytes_written = 0;
ssize_t write_res;
ssize_t bytes_read = 0;
ssize_t read_res;
int status = 0;
pid_t changed_pid;
#endif
if (params != NULL) {
output_l = 2;
fputs(" ", MUNIT_OUTPUT_FILE);
first = true;
for (param = params ; param != NULL && param->name != NULL ; param++) {
if (!first) {
fputs(", ", MUNIT_OUTPUT_FILE);
output_l += 2;
} else {
first = false;
}
output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value);
}
while (output_l++ < MUNIT_TEST_NAME_LEN) {
fputc(' ', MUNIT_OUTPUT_FILE);
}
}
fflush(MUNIT_OUTPUT_FILE);
stderr_buf = NULL;
#if !defined(_WIN32) || defined(__MINGW32__)
stderr_buf = tmpfile();
#else
tmpfile_s(&stderr_buf);
#endif
if (stderr_buf == NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr");
result = MUNIT_ERROR;
goto print_result;
}
#if !defined(MUNIT_NO_FORK)
if (runner->fork) {
pipefd[0] = -1;
pipefd[1] = -1;
if (pipe(pipefd) != 0) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe");
result = MUNIT_ERROR;
goto print_result;
}
fork_pid = fork();
if (fork_pid == 0) {
close(pipefd[0]);
orig_stderr = munit_replace_stderr(stderr_buf);
munit_test_runner_exec(runner, test, params, &report);
/* Note that we don't restore stderr. This is so we can buffer
* things written to stderr later on (such as by
* asan/tsan/ubsan, valgrind, etc.) */
close(orig_stderr);
do {
write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written);
if (write_res < 0) {
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe");
}
exit(EXIT_FAILURE);
}
bytes_written += write_res;
} while ((size_t) bytes_written < sizeof(report));
if (stderr_buf != NULL)
fclose(stderr_buf);
close(pipefd[1]);
exit(EXIT_SUCCESS);
} else if (fork_pid == -1) {
close(pipefd[0]);
close(pipefd[1]);
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork");
}
report.errored++;
result = MUNIT_ERROR;
} else {
close(pipefd[1]);
do {
read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read);
if (read_res < 1)
break;
bytes_read += read_res;
} while (bytes_read < (ssize_t) sizeof(report));
changed_pid = waitpid(fork_pid, &status, 0);
if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) {
if (bytes_read != sizeof(report)) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status));
report.errored++;
} else if (WEXITSTATUS(status) != EXIT_SUCCESS) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status));
report.errored++;
}
} else {
if (WIFSIGNALED(status)) {
#if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status)));
#else
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status));
#endif
} else if (WIFSTOPPED(status)) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status));
}
report.errored++;
}
close(pipefd[0]);
waitpid(fork_pid, NULL, 0);
}
} else
#endif
{
#if !defined(MUNIT_NO_BUFFER)
const volatile int orig_stderr = munit_replace_stderr(stderr_buf);
#endif
#if defined(MUNIT_THREAD_LOCAL)
if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) {
result = MUNIT_FAIL;
report.failed++;
} else {
munit_error_jmp_buf_valid = true;
result = munit_test_runner_exec(runner, test, params, &report);
}
#else
result = munit_test_runner_exec(runner, test, params, &report);
#endif
#if !defined(MUNIT_NO_BUFFER)
munit_restore_stderr(orig_stderr);
#endif
/* Here just so that the label is used on Windows and we don't get
* a warning */
goto print_result;
}
print_result:
fputs("[ ", MUNIT_OUTPUT_FILE);
if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) {
if (report.failed != 0 || report.errored != 0 || report.skipped != 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3');
result = MUNIT_OK;
} else {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
if (MUNIT_LIKELY(stderr_buf != NULL))
munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful.");
runner->report.failed++;
result = MUNIT_ERROR;
}
} else if (report.failed > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1');
runner->report.failed++;
result = MUNIT_FAIL;
} else if (report.errored > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
runner->report.errored++;
result = MUNIT_ERROR;
} else if (report.skipped > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3');
runner->report.skipped++;
result = MUNIT_SKIP;
} else if (report.successful > 1) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful);
fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", "");
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
} else if (report.successful > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
}
fputs(" ]\n", MUNIT_OUTPUT_FILE);
if (stderr_buf != NULL) {
if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) {
fflush(MUNIT_OUTPUT_FILE);
rewind(stderr_buf);
munit_splice(fileno(stderr_buf), STDERR_FILENO);
fflush(stderr);
}
fclose(stderr_buf);
}
}
static void
munit_test_runner_run_test_wild(MunitTestRunner* runner,
const MunitTest* test,
const char* test_name,
MunitParameter* params,
MunitParameter* p) {
const MunitParameterEnum* pe;
char** values;
MunitParameter* next;
for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) {
if (p->name == pe->name)
break;
}
if (pe == NULL)
return;
for (values = pe->values ; *values != NULL ; values++) {
next = p + 1;
p->value = *values;
if (next->name == NULL) {
munit_test_runner_run_test_with_params(runner, test, params);
} else {
munit_test_runner_run_test_wild(runner, test, test_name, params, next);
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
break;
}
}
/* Run a single test, with every combination of parameters
* requested. */
static void
munit_test_runner_run_test(MunitTestRunner* runner,
const MunitTest* test,
const char* prefix) {
char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name);
/* The array of parameters to pass to
* munit_test_runner_run_test_with_params */
MunitParameter* params = NULL;
size_t params_l = 0;
/* Wildcard parameters are parameters which have possible values
* specified in the test, but no specific value was passed to the
* CLI. That means we want to run the test once for every
* possible combination of parameter values or, if --single was
* passed to the CLI, a single time with a random set of
* parameters. */
MunitParameter* wild_params = NULL;
size_t wild_params_l = 0;
const MunitParameterEnum* pe;
const MunitParameter* cli_p;
bool filled;
unsigned int possible;
char** vals;
size_t first_wild;
const MunitParameter* wp;
int pidx;
munit_rand_seed(runner->seed);
fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name);
if (test->parameters == NULL) {
/* No parameters. Simple, nice. */
munit_test_runner_run_test_with_params(runner, test, NULL);
} else {
fputc('\n', MUNIT_OUTPUT_FILE);
for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) {
/* Did we received a value for this parameter from the CLI? */
filled = false;
for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) {
if (strcmp(cli_p->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, cli_p->value) != MUNIT_OK))
goto cleanup;
filled = true;
break;
}
}
if (filled)
continue;
/* Nothing from CLI, is the enum NULL/empty? We're not a
* fuzzer… */
if (pe->values == NULL || pe->values[0] == NULL)
continue;
/* If --single was passed to the CLI, choose a value from the
* list of possibilities randomly. */
if (runner->single_parameter_mode) {
possible = 0;
for (vals = pe->values ; *vals != NULL ; vals++)
possible++;
/* We want the tests to be reproducible, even if you're only
* running a single test, but we don't want every test with
* the same number of parameters to choose the same parameter
* number, so use the test name as a primitive salt. */
pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1);
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[pidx]) != MUNIT_OK))
goto cleanup;
} else {
/* We want to try every permutation. Put in a placeholder
* entry, we'll iterate through them later. */
if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK))
goto cleanup;
}
}
if (wild_params_l != 0) {
first_wild = params_l;
for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) {
for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) {
if (strcmp(wp->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[0]) != MUNIT_OK))
goto cleanup;
}
}
}
munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild);
} else {
munit_test_runner_run_test_with_params(runner, test, params);
}
cleanup:
free(params);
free(wild_params);
}
munit_maybe_free_concat(test_name, prefix, test->name);
}
/* Recurse through the suite and run all the tests. If a list of
* tests to run was provied on the command line, run only those
* tests. */
static void
munit_test_runner_run_suite(MunitTestRunner* runner,
const MunitSuite* suite,
const char* prefix) {
size_t pre_l;
char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix);
const MunitTest* test;
const char** test_name;
const MunitSuite* child_suite;
/* Run the tests. */
for (test = suite->tests ; test != NULL && test->test != NULL ; test++) {
if (runner->tests != NULL) { /* Specific tests were requested on the CLI */
for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) {
if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) &&
strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) {
munit_test_runner_run_test(runner, test, pre);
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
}
}
} else { /* Run all tests */
munit_test_runner_run_test(runner, test, pre);
}
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
/* Run any child suites. */
for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) {
munit_test_runner_run_suite(runner, child_suite, pre);
}
cleanup:
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static void
munit_test_runner_run(MunitTestRunner* runner) {
munit_test_runner_run_suite(runner, runner->suite, NULL);
}
static void
munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) {
const MunitArgument* arg;
(void) argc;
printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]);
puts(" --seed SEED\n"
" Value used to seed the PRNG. Must be a 32-bit integer in decimal\n"
" notation with no separators (commas, decimals, spaces, etc.), or\n"
" hexidecimal prefixed by \"0x\".\n"
" --iterations N\n"
" Run each test N times. 0 means the default number.\n"
" --param name value\n"
" A parameter key/value pair which will be passed to any test with\n"
" takes a parameter of that name. If not provided, the test will be\n"
" run once for each possible parameter value.\n"
" --list Write a list of all available tests.\n"
" --list-params\n"
" Write a list of all available tests and their possible parameters.\n"
" --single Run each parameterized test in a single configuration instead of\n"
" every possible combination\n"
" --log-visible debug|info|warning|error\n"
" --log-fatal debug|info|warning|error\n"
" Set the level at which messages of different severities are visible,\n"
" or cause the test to terminate.\n"
#if !defined(MUNIT_NO_FORK)
" --no-fork Do not execute tests in a child process. If this option is supplied\n"
" and a test crashes (including by failing an assertion), no further\n"
" tests will be performed.\n"
#endif
" --fatal-failures\n"
" Stop executing tests as soon as a failure is found.\n"
" --show-stderr\n"
" Show data written to stderr by the tests, even if the test succeeds.\n"
" --color auto|always|never\n"
" Colorize (or don't) the output.\n"
/* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */
" --help Print this help message and exit.\n");
#if defined(MUNIT_NL_LANGINFO)
setlocale(LC_ALL, "");
fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout);
#else
puts("munit");
#endif
printf(" %d.%d.%d\n"
"Full documentation at: https://nemequ.github.io/munit/\n",
(MUNIT_CURRENT_VERSION >> 16) & 0xff,
(MUNIT_CURRENT_VERSION >> 8) & 0xff,
(MUNIT_CURRENT_VERSION >> 0) & 0xff);
for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++)
arg->write_help(arg, user_data);
}
static const MunitArgument*
munit_arguments_find(const MunitArgument arguments[], const char* name) {
const MunitArgument* arg;
for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++)
if (strcmp(arg->name, name) == 0)
return arg;
return NULL;
}
static void
munit_suite_list_tests(const MunitSuite* suite, bool show_params, const char* prefix) {
size_t pre_l;
char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix);
const MunitTest* test;
const MunitParameterEnum* params;
bool first;
char** val;
const MunitSuite* child_suite;
for (test = suite->tests ;
test != NULL && test->name != NULL ;
test++) {
if (pre != NULL)
fputs(pre, stdout);
puts(test->name);
if (show_params) {
for (params = test->parameters ;
params != NULL && params->name != NULL ;
params++) {
fprintf(stdout, " - %s: ", params->name);
if (params->values == NULL) {
puts("Any");
} else {
first = true;
for (val = params->values ;
*val != NULL ;
val++ ) {
if(!first) {
fputs(", ", stdout);
} else {
first = false;
}
fputs(*val, stdout);
}
putc('\n', stdout);
}
}
}
}
for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) {
munit_suite_list_tests(child_suite, show_params, pre);
}
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static bool
munit_stream_supports_ansi(FILE *stream) {
#if !defined(_WIN32)
return isatty(fileno(stream));
#else
#if !defined(__MINGW32__)
size_t ansicon_size = 0;
#endif
if (isatty(fileno(stream))) {
#if !defined(__MINGW32__)
getenv_s(&ansicon_size, NULL, 0, "ANSICON");
return ansicon_size != 0;
#else
return getenv("ANSICON") != NULL;
#endif
}
return false;
#endif
}
int
munit_suite_main_custom(const MunitSuite* suite, void* user_data,
int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)],
const MunitArgument arguments[]) {
int result = EXIT_FAILURE;
MunitTestRunner runner;
size_t parameters_size = 0;
size_t tests_size = 0;
int arg;
char* envptr;
unsigned long ts;
char* endptr;
unsigned long long iterations;
MunitLogLevel level;
const MunitArgument* argument;
const char** runner_tests;
unsigned int tests_run;
unsigned int tests_total;
runner.prefix = NULL;
runner.suite = NULL;
runner.tests = NULL;
runner.seed = 0;
runner.iterations = 0;
runner.parameters = NULL;
runner.single_parameter_mode = false;
runner.user_data = NULL;
runner.report.successful = 0;
runner.report.skipped = 0;
runner.report.failed = 0;
runner.report.errored = 0;
#if defined(MUNIT_ENABLE_TIMING)
runner.report.cpu_clock = 0;
runner.report.wall_clock = 0;
#endif
runner.colorize = false;
#if !defined(_WIN32)
runner.fork = true;
#else
runner.fork = false;
#endif
runner.show_stderr = false;
runner.fatal_failures = false;
runner.suite = suite;
runner.user_data = user_data;
runner.seed = munit_rand_generate_seed();
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
for (arg = 1 ; arg < argc ; arg++) {
if (strncmp("--", argv[arg], 2) == 0) {
if (strcmp("seed", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
envptr = argv[arg + 1];
ts = strtoul(argv[arg + 1], &envptr, 0);
if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.seed = (munit_uint32_t) ts;
arg++;
} else if (strcmp("iterations", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
endptr = argv[arg + 1];
iterations = strtoul(argv[arg + 1], &endptr, 0);
if (*endptr != '\0' || iterations > UINT_MAX) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.iterations = (unsigned int) iterations;
arg++;
} else if (strcmp("param", argv[arg] + 2) == 0) {
if (arg + 2 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]);
goto cleanup;
}
runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2));
if (runner.parameters == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.parameters[parameters_size].name = (char*) argv[arg + 1];
runner.parameters[parameters_size].value = (char*) argv[arg + 2];
parameters_size++;
runner.parameters[parameters_size].name = NULL;
runner.parameters[parameters_size].value = NULL;
arg += 2;
} else if (strcmp("color", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "always") == 0)
runner.colorize = true;
else if (strcmp(argv[arg + 1], "never") == 0)
runner.colorize = false;
else if (strcmp(argv[arg + 1], "auto") == 0)
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
arg++;
} else if (strcmp("help", argv[arg] + 2) == 0) {
munit_print_help(argc, argv, user_data, arguments);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("single", argv[arg] + 2) == 0) {
runner.single_parameter_mode = true;
} else if (strcmp("show-stderr", argv[arg] + 2) == 0) {
runner.show_stderr = true;
#if !defined(_WIN32)
} else if (strcmp("no-fork", argv[arg] + 2) == 0) {
runner.fork = false;
#endif
} else if (strcmp("fatal-failures", argv[arg] + 2) == 0) {
runner.fatal_failures = true;
} else if (strcmp("log-visible", argv[arg] + 2) == 0 ||
strcmp("log-fatal", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "debug") == 0)
level = MUNIT_LOG_DEBUG;
else if (strcmp(argv[arg + 1], "info") == 0)
level = MUNIT_LOG_INFO;
else if (strcmp(argv[arg + 1], "warning") == 0)
level = MUNIT_LOG_WARNING;
else if (strcmp(argv[arg + 1], "error") == 0)
level = MUNIT_LOG_ERROR;
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
if (strcmp("log-visible", argv[arg] + 2) == 0)
munit_log_level_visible = level;
else
munit_log_level_fatal = level;
arg++;
} else if (strcmp("list", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, false, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("list-params", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, true, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else {
argument = munit_arguments_find(arguments, argv[arg] + 2);
if (argument == NULL) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]);
goto cleanup;
}
if (!argument->parse_argument(suite, user_data, &arg, argc, argv))
goto cleanup;
}
} else {
runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2));
if (runner_tests == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.tests = runner_tests;
runner.tests[tests_size++] = argv[arg];
runner.tests[tests_size] = NULL;
}
}
fflush(stderr);
fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed);
munit_test_runner_run(&runner);
tests_run = runner.report.successful + runner.report.failed + runner.report.errored;
tests_total = tests_run + runner.report.skipped;
if (tests_run == 0) {
fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped);
} else {
fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n",
runner.report.successful, tests_run,
(((double) runner.report.successful) / ((double) tests_run)) * 100.0,
runner.report.skipped,
(((double) runner.report.skipped) / ((double) tests_total)) * 100.0);
}
if (runner.report.failed == 0 && runner.report.errored == 0) {
result = EXIT_SUCCESS;
}
cleanup:
free(runner.parameters);
free((void*) runner.tests);
return result;
}
int
munit_suite_main(const MunitSuite* suite, void* user_data,
int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) {
return munit_suite_main_custom(suite, user_data, argc, argv, NULL);
}
|
assumes_messages_attr.c | // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -fopenmp-version=51 -std=c99 -fms-extensions -fdouble-square-bracket-attributes -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -fopenmp-version=51 -std=c99 -fms-extensions -fdouble-square-bracket-attributes -Wno-pragma-pack %s
[[omp::directive(assumes)]]; // expected-error {{expected at least one 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism' clause for '#pragma omp assumes'}}
[[omp::directive(begin)]]; // expected-error {{expected an OpenMP directive}}
[[omp::directive(begin assumes)]]; // expected-error {{expected at least one 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism' clause for '#pragma omp begin assumes'}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes foobar)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes foobar)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(begin assumes foobar(foo 2 baz))]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(assumes foobar(foo 2 baz))]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes no_openmp(1))]]; // expected-warning {{'no_openmp' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(begin assumes no_openmp(1 2 3))]]; // expected-warning {{'no_openmp' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes no_openmp(1))]];
[[omp::directive(assumes foobar no_openmp bazbaz)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}} expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes foobar no_openmp bazbaz)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}} expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(begin assumes foobar(foo 2 baz) no_openmp bazbaz(foo 2 baz))]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(assumes foobar(foo 2 baz) no_openmp bazbaz(foo 2 baz))]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; tokens will be ignored}} expected-note {{the ignored tokens spans until here}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes no_openmp foobar no_openmp)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes no_openmp foobar no_openmp)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes holds(1, 2 3))]];
[[omp::directive(begin assumes holds(1, 2 3))]];
[[omp::directive(end assumes)]];
[[omp::directive(assumes absent(1, 2 3))]];
[[omp::directive(begin assumes absent(1, 2 3))]];
[[omp::directive(end assumes)]];
[[omp::directive(assumes contains(1, 2 3))]];
[[omp::directive(begin assumes contains(1, 2 3))]];
[[omp::directive(end assumes)]];
[[omp::directive(assumes ext)]]; // expected-warning {{valid assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(begin assumes ext)]]; // expected-warning {{valid begin assumes clauses start with 'ext_', 'absent', 'contains', 'holds', 'no_openmp', 'no_openmp_routines', 'no_parallelism'; token will be ignored}}
[[omp::directive(end assumes)]];
[[omp::directive(assumes ext_123(not allowed))]]; // expected-warning {{'ext_123' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(begin assumes ext_123(not allowed))]]; // expected-warning {{'ext_123' clause should not be followed by arguments; tokens will be ignored}} expected-note {{the ignored tokens spans until here}}
[[omp::directive(end assumes)]];
[[omp::directive(end assumes)]]; // expected-error {{'#pragma omp end assumes' with no matching '#pragma omp begin assumes'}}
// TODO: we should emit a warning at least.
[[omp::directive(begin assumes ext_abc)]];
|
GB_unaryop__abs_uint16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint16_uint8
// op(A') function: GB_tran__abs_uint16_uint8
// C type: uint16_t
// A type: uint8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint16_uint8
(
uint16_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
structure_factors_direct.h | #ifndef CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H
#define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H
#include <cctbx/xray/scattering_type_registry.h>
#include <cctbx/xray/hr_ht_cache.h>
#include <cctbx/math/cos_sin_table.h>
#include <omptbx/omp_or_stubs.h>
//#define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP
namespace cctbx { namespace xray { namespace structure_factors {
template <typename CosSinType, typename ScattererType>
struct direct_sum_over_equivalent_h
{
typedef typename ScattererType::float_type float_type;
typedef std::complex<float_type> complex_type;
direct_sum_over_equivalent_h(
CosSinType const& cos_sin_,
sgtbx::space_group const& space_group_,
miller::index<> h,
float_type d_star_sq_)
:
cos_sin(cos_sin_),
hr_ht(cos_sin_, space_group_, h),
d_star_sq(d_star_sq_),
sum_f_calc(0,0)
{}
void add_contribution_of(ScattererType const& scatterer,
float_type f0)
{
typedef float_type f_t;
typedef complex_type c_t;
c_t f_calc(0,0);
for(std::size_t i=0;i<hr_ht.groups.size();i++) {
hr_ht_group<f_t> const& g = hr_ht.groups[i];
f_t hrx = g.hr * scatterer.site;
c_t term = cos_sin.get(hrx + g.ht);
if (scatterer.flags.use_u_aniso()) {
f_t dw = adptbx::debye_waller_factor_u_star(g.hr, scatterer.u_star);
term *= dw;
}
f_calc += term;
}
if (hr_ht.is_origin_centric) {
f_calc = c_t(2*f_calc.real(),0);
}
else if (hr_ht.is_centric) {
f_calc += std::conj(f_calc) * hr_ht.f_h_inv_t;
}
if (scatterer.flags.use_u_iso() && scatterer.u_iso != 0) {
f_t dw=adptbx::debye_waller_factor_u_iso(d_star_sq/4, scatterer.u_iso);
f_calc *= dw;
}
f_t w = scatterer.weight();
f_t f0p_w = (f0 + scatterer.fp) * w;
f_t fdp_w = scatterer.fdp;
if (fdp_w != 0) {
fdp_w *= w;
f_calc *= c_t(f0p_w, fdp_w);
}
else {
f_calc *= f0p_w;
}
sum_f_calc += f_calc;
}
complex_type f_calc() {
return sum_f_calc * hr_ht.ltr_factor;
}
CosSinType const &cos_sin;
hr_ht_cache<float_type> hr_ht;
float_type d_star_sq;
complex_type sum_f_calc;
};
template <class ScattererType=scatterer<> >
class direct
{
public:
typedef ScattererType scatterer_type;
typedef typename ScattererType::float_type float_type;
direct() {}
direct(
uctbx::unit_cell const& unit_cell,
sgtbx::space_group const& space_group,
af::const_ref<miller::index<> > const& miller_indices,
af::const_ref<ScattererType> const& scatterers,
xray::scattering_type_registry const& scattering_type_registry)
{
math::cos_sin_exact<float_type> cos_sin;
compute(cos_sin, unit_cell, space_group, miller_indices,
scatterers, scattering_type_registry);
}
template<class CosSinType>
direct(
CosSinType const& cos_sin,
uctbx::unit_cell const& unit_cell,
sgtbx::space_group const& space_group,
af::const_ref<miller::index<> > const& miller_indices,
af::const_ref<ScattererType> const& scatterers,
xray::scattering_type_registry const& scattering_type_registry)
{
compute(cos_sin, unit_cell, space_group, miller_indices,
scatterers, scattering_type_registry);
}
af::shared<std::complex<float_type> > const&
f_calc() const { return f_calc_; }
private:
af::shared<std::complex<float_type> > f_calc_;
template <typename CosSinType>
void
compute(
CosSinType const& cos_sin,
uctbx::unit_cell const& unit_cell,
sgtbx::space_group const& space_group,
af::const_ref<miller::index<> > const& miller_indices,
af::const_ref<ScattererType> const& scatterers,
xray::scattering_type_registry const& scattering_type_registry)
{
typedef float_type f_t;
typedef std::complex<float_type> c_t;
int n = static_cast<int>(miller_indices.size());
f_calc_ = af::shared<c_t>(n, af::init_functor_null<c_t>());
c_t *f_calc_beg = f_calc_.begin();
af::shared<std::size_t> scattering_type_indices
= scattering_type_registry.unique_indices(scatterers);
/* The OpenMP standard specifies that
A throw executed inside a parallel region must cause execution
to resume within the same parallel region, and it must be caught
by the same thread that threw the exception.
Since a std::runtime_error may be thrown during Debye-Waller
computations (c.f. adptbx.h, function debye_waller_factor_exp)
one must make sure it cannot escape the body of the parallelised
loop. So we catch it inside the loop and then re-throw it
immediately after the loop finished.
*/
boost::optional<std::runtime_error> error;
#if !defined(CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP)
#if !defined(__DECCXX_VER) || (defined(_OPENMP) && _OPENMP > 199819)
#pragma omp parallel for schedule(static)
#endif
#endif
for(int i=0;i<n;i++) {
try {
miller::index<> h = miller_indices[i];
f_t d_star_sq = unit_cell.d_star_sq(h);
af::shared<double> form_factors
= scattering_type_registry.unique_form_factors_at_d_star_sq(
d_star_sq);
direct_sum_over_equivalent_h<CosSinType, ScattererType>
sum(cos_sin, space_group, h, d_star_sq);
for(std::size_t j=0; j<scatterers.size(); ++j) {
sum.add_contribution_of(scatterers[j],
form_factors[scattering_type_indices[j]]);
}
f_calc_beg[i] = sum.f_calc();
}
catch (std::runtime_error e) {
#pragma omp critical
{
// The first error will be recorded only.
if (!error) error = e;
}
}
}
if (error) throw *error;
}
};
}}} // namespace cctbx::xray::structure_factors
#endif // CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H
|
Matrix_Add_ColumnMajor.c | #include<stdio.h>
#include<stdlib.h>
#include <sys/time.h>
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
#define size 10000
#define NT 8
int A[size][size];
int B[size][size];
int C[size][size];
int flag[size];//to set flag[i]==1 if arr[i] is maximum
int main(int argc, char *argv[]){
if(argc!=2){
printf("Usage path-to-executable seedvalue (example usage: ./a.out 3)\n");
exit(0);
}
srand(atoi(argv[1]));//Seed for random number command line integer value
//generates random number
for(int i=0;i<size;i++){
for(int j=0;j<size;j++){
A[i][j]=rand()%1048576;
B[i][j]=rand()%1048576;
}
}
double t1=rtclock();
#pragma omp parallel for num_threads(8)
for(int i=0;i<size;i++)
for(int j=0;j<size;j++)
C[j][i]=A[j][i]+B[j][i];
double t2=rtclock();
printf("\nTIME =%f \n",(t2-t1)*1000);
}
/*Run executable-path <integer-seed-value>
*example:
./a.out 3 */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "opencl.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
if (filename) filename[strcspn(filename, "\n\r")] = 0;
char *pos;
if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0';
if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0';
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
int index;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
//grayscale_image_3c(crop);
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
//if (filename) filename[strcspn(filename, "\n\r")] = 0;
char *pos;
if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0';
if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0';
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 90; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
int j;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
image mask = resize_image(sized, mw, mh);
truth[i*(mw*mh+1)] = id;
for(j = 0; j < mw*mh; ++j){
truth[i*(mw*mh + 1) + 1 + j] = mask.data[j];
}
++i;
free_image(mask);
free_image(sized);
free(rle);
}
if(i < num_boxes) truth[i*(mw*mh+1)] = -1;
fclose(file);
free_image(part);
}
void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) {
++sub;
continue;
}
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
//printf("%s %s %d\n", path, labels[i], i);
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".BMP", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPeG", ".txt", labelpath);
find_replace(labelpath, ".Jpeg", ".txt", labelpath);
find_replace(labelpath, ".PNG", ".txt", labelpath);
find_replace(labelpath, ".TIF", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".jpeg", ".txt", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".tif", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
for(j = 0; j < k; ++j){
fscanf(file, "%f", &(y.vals[i][j]));
}
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "images", "labels", label);
find_replace(label, ".jpg", ".txt", label);
FILE *file = fopen(label, "r");
if (!file) continue;
//++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
//printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*90;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
//float scale = rand_uniform(.25, 2);
float scale = 1;
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == ISEG_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) {
error("Thread creation failed");
return 0;
}
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
omp_rr.c | /*
Ubuntu:
-------
gcc -I/home/heiland/dev/libRR_1.3/include/rr/C -fopenmp omp_rr.c -L/home/heiland/dev/libRR_1.3/lib -lroadrunner_c_api -o omp_rr
OSX:
-----
#CFLAGS := -march=$(ARCH) -g -fomit-frame-pointer -Xpreprocessor -fopenmp -m64 -std=c++11
clang -Xpreprocessor -fopenmp -m64 -I/Users/heiland/dev/roadrunner-osx-10.9-cp36m/include/rr/C -L/Users/heiland/dev/roadrunner-osx-10.9-cp36m/lib -lroadrunner_c_api -L/usr/local/opt/libomp/lib -lomp omp_rr.c -o omp_rr
export DYLD_LIBRARY_PATH=/Users/heiland/dev/roadrunner-osx-10.9-cp36m/lib
vs. (not necessary to point to libomp)
export DYLD_LIBRARY_PATH=/Users/heiland/dev/roadrunner-osx-10.9-cp36m/lib:/usr/local/opt/libomp/lib
http://sys-bio.github.io/roadrunner/c_api_docs/html/group__helper_routines.html
http://sys-bio.github.io/roadrunner/c_api_docs/html/rrc__api_8h.html
http://sys-bio.github.io/roadrunner/c_api_docs/html/group__initial_conditions.html
clang -I/Users/heiland/dev/roadrunner-osx-10.9-cp36m/include/rr/C -L/Users/heiland/dev/roadrunner-osx-10.9-cp36m/lib -lroadrunner_c_api -L/usr/local/opt/libomp/lib -lomp omp_rr.c -o omp_rr
*/
#undef __cplusplus
#define STATIC_RRC
#include <stdio.h>
#include <stdlib.h>
#include "rrc_api.h"
#include "rrc_types.h"
#include "rrc_utilities.h"
#include <omp.h>
int main (int argc, char *argv[])
{
if(argc < 4) {
printf("Provide args: <name of sbml file> <num cells> <num threads>\n");
exit(1);
}
int ncells = atoi(argv[2]);
printf("------ # of cells = %d\n\n", ncells);
int nthreads = atoi(argv[3]);
printf("------ # of threads = %d\n\n", nthreads);
omp_set_num_threads(nthreads);
// #pragma omp parallel for
#pragma omp parallel
{
RRHandle rrHandle;
// RRHandle rrHandleArray[42];
RRVectorPtr vptr;
RRCDataPtr result; // start time, end time, and number of points
static int idx_oxygen = 3;
// #pragma omp parallel for private(rrHandle, vptr, result, idx_oxygen)
#pragma omp parallel for
for (int icell=0; icell<ncells; icell++)
{
printf ("------------ cell %d ---------------\n", icell);
// printf ("Starting Test Program %s\n", argv[0]);
rrHandle = createRRInstance();
if (!loadSBML (rrHandle, "feedback.xml")) {
printf ("Error while loading SBML file\n");
printf ("Error message: %s\n", getLastError());
getchar ();
exit (0);
}
int r = getNumberOfReactions(rrHandle);
int m = getNumberOfFloatingSpecies(rrHandle);
int b = getNumberOfBoundarySpecies(rrHandle);
int p = getNumberOfGlobalParameters(rrHandle);
int c = getNumberOfCompartments(rrHandle);
printf ("Number of reactions = %d\n", r);
printf ("Number of floating species = %d\n", m); // 4
printf ("Number of boundary species = %d\n", b); // 0
printf ("Number of compartments = %d\n", c); // 1
printf ("Floating species names:\n");
printf ("-----------------------\n");
printf("%s\n\n",stringArrayToString(getFloatingSpeciesIds(rrHandle)));
/* rrc_api.h:C_DECL_SPEC RRVectorPtr rrcCallConv getFloatingSpeciesConcentrations(RRHandle handle);
rrc_api.h:C_DECL_SPEC bool rrcCallConv setFloatingSpeciesInitialConcentrations (RRHandle handle, const RRVectorPtr vec);
*/
printf ("Floating species conc:\n");
printf ("-------------------------------\n");
// RRVectorPtr vptr = getFloatingSpeciesConcentrations(rrHandle);
vptr = getFloatingSpeciesConcentrations(rrHandle);
/*
rrc_types.h:
typedef struct RRVector
{
int Count;
double* Data;
} *RRVectorPtr;
*/
printf("%d\n",vptr->Count);
for (int idx=0; idx<vptr->Count; idx++)
printf("%d %f\n",idx, vptr->Data[idx]);
// idx_oxygen = 3;
vptr->Data[idx_oxygen] += 0.1;
setFloatingSpeciesConcentrations(rrHandle, vptr);
vptr = getFloatingSpeciesConcentrations(rrHandle);
printf("%d\n",vptr->Count);
for (int idx=0; idx<vptr->Count; idx++)
printf("%d %f\n",idx, vptr->Data[idx]);
result = simulateEx (rrHandle, 0, 10, 10); // start time, end time, and number of points
int index = 0;
// Print out column headers... typically time and species.
for (int col = 0; col < result->CSize; col++)
{
printf ("%10s", result->ColumnHeaders[index++]);
if (col < result->CSize - 1)
{
printf ("\t");
}
}
printf ("\n");
index = 0;
// Print out the data
for (int row = 0; row < result->RSize; row++)
{
for (int col = 0; col < result->CSize; col++)
{
printf ("%10f", result->Data[index++]);
if (col < result->CSize -1)
{
printf ("\t");
}
}
printf ("\n");
}
//Cleanup
freeRRCData (result);
freeRRInstance (rrHandle);
// getchar ();
} // end for
} // end pragma
exit (0);
}
|
iSENSE_MEX.c | /**************************************************************************
MEX function to compute the approximate gradient of the absolute value
Author: R. Marc Lebel
Contact: mlebel@gmail.com
Date: 11/2010
Useage: imgS = absgradMEX(img,sens)
Input:
img: numeric array (single/double; real/complex)
sens: sensitivity maps
Output:
imgS: multichanel image
**************************************************************************/
#include "mex.h"
#include <math.h>
#include <string.h>
#include "fast_mxArray_setup.c"
#ifdef __GNU__
#include <omp.h>
#endif
#ifndef MAXCORES
#define MAXCORES 1
#endif
void mexFunction(int nlhs, mxArray *left[], int nrhs, const mxArray *right[]) {
/* Declare variables */
mwSize i, j, k, t, r;
mwSize np, nv, ns, nt, nr;
mwSize indS, indIMG, indIMGS, indXY, indXYZ, indXYZT, indT1, indT2, indT3;
mwSize nD, *sizeOUT;
mxClassID precision;
double *pSZd, *pIMGrd, *pIMGid, *pSrd, *pSid, *pIMGSrd, *pIMGSid;
float *pSZf, *pIMGrf, *pIMGif, *pSrf, *pSif, *pIMGSrf, *pIMGSif;
/* Get size */
nD = 4;
/*mexPrintf("nD: %i\n",nD);*/
if (mxGetClassID(right[2]) == mxDOUBLE_CLASS) {
pSZd = mxGetPr(right[2]);
np = (int)pSZd[0]; nv = (int)pSZd[1]; ns = (int)pSZd[2]; nt = (int)pSZd[3]; nr = (int)pSZd[4];
}
else {
pSZf = mxGetData(right[2]);
np = (int)pSZf[0]; nv = (int)pSZf[1]; ns = (int)pSZf[2]; nt = (int)pSZf[3]; nr = (int)pSZf[4];
}
/*mexPrintf("size: %i x %i x %i x %i x %i\n",np,nv,ns,nt,nr);*/
/* Perform strange memory copy to replicate the size (needed for create_array_d/f) */
sizeOUT = (mwSize *)mxMalloc(nD*sizeof(mwSize));
sizeOUT[0] = np; sizeOUT[1] = nv; sizeOUT[2] = ns; sizeOUT[3] = nt;
/*mexPrintf("sizeOUT: %i x %i x %i x %i\n",sizeOUT[0],sizeOUT[1],sizeOUT[2],sizeOUT[3]);*/
/* Test for complex and obtain data class */
if (!(mxIsComplex(right[0]) & mxIsComplex(right[1])))
mexErrMsgTxt("Inputs need to be complex");
precision = mxGetClassID(right[0]);
/* Get pointers to input array and create output */
if (precision == mxDOUBLE_CLASS) {
pIMGSrd = mxGetPr(right[0]);
pIMGSid = mxGetPi(right[0]);
pSrd = mxGetPr(right[1]);
pSid = mxGetPi(right[1]);
/* Create output and assign pointers */
/*create_array_d(&(left[0]), &pIMGrd, &pIMGid, nD, sizeOUT, mxCOMPLEX, 0);*/
left[0] = mxCreateNumericArray(nD,sizeOUT,precision,mxCOMPLEX);
pIMGrd = mxGetPr(left[0]);
pIMGid = mxGetPi(left[0]);
}
else {
pIMGSrf = mxGetData(right[0]);
pIMGSif = mxGetImagData(right[0]);
pSrf = mxGetData(right[1]);
pSif = mxGetImagData(right[1]);
/* Create output and assign pointers */
/*create_array_f(&(left[0]), &pIMGrf, &pIMGif, nD, sizeOUT, mxCOMPLEX, 0);*/
left[0] = mxCreateNumericArray(nD,sizeOUT,precision,mxCOMPLEX);
pIMGrf = mxGetData(left[0]);
pIMGif = mxGetImagData(left[0]);
}
#ifdef __GNU__
/* Set number of threads */
omp_set_num_threads(MAXCORES);
#endif
/* Loop through elements */
indXY = np*nv;
indXYZ = np*nv*ns;
indXYZT = np*nv*ns*nt;
if (precision == mxDOUBLE_CLASS) {
/*#pragma omp parallel for private(i,j,k,t,r,indT1,indT2,indT3,indIMG,indIMGS,indS) reduction(+: pIMGrd, pIMGid)*/
for (r=0; r<nr; r++) {
for (t=0; t<nt; t++) {
for (k=0; k<ns; k++) {
for (j=0; j<nv; j++) {
indT1 = t*indXYZ + k*indXY + j*np;
indT2 = r*indXYZT + t*indXYZ + k*indXY + j*np;
indT3 = r*indXYZ + k*indXY + j*np;
for (i=0; i<np; i++) {
indIMGS = indT2 + i;
indIMG = indT1 + i;
indS = indT3 + i;
pIMGrd[indIMG] += pIMGSrd[indIMGS]*pSrd[indS] + pIMGSid[indIMGS]*pSid[indS];
pIMGid[indIMG] += pIMGSid[indIMGS]*pSrd[indS] - pIMGSrd[indIMGS]*pSid[indS];
}
}
}
}
}
}
else {
/*#pragma omp parallel for private(i,j,k,t,r,indT1,indT2,indT3,indIMG,indIMGS,indS)*/
for (r=0; r<nr; r++) {
for (t=0; t<nt; t++) {
for (k=0; k<ns; k++) {
for (j=0; j<nv; j++) {
indT1 = t*indXYZ + k*indXY + j*np;
indT2 = r*indXYZT + t*indXYZ + k*indXY + j*np;
indT3 = r*indXYZ + k*indXY + j*np;
for (i=0; i<np; i++) {
indIMGS = indT2 + i;
indIMG = indT1 + i;
indS = indT3 + i;
pIMGrf[indIMG] += pIMGSrf[indIMGS]*pSrf[indS] + pIMGSif[indIMGS]*pSif[indS];
pIMGif[indIMG] += pIMGSif[indIMGS]*pSrf[indS] - pIMGSrf[indIMGS]*pSif[indS];
}
}
}
}
}
}
/* Free memory */
mxFree(sizeOUT);
}
|
DRB040-truedepsingleelement-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Data race pair: a[i]@63:5 vs. a[0]@63:15
*/
#include <stdlib.h>
int main (int argc, char* argv[])
{
int len=1000;
int i;
if (argc>1)
len = atoi(argv[1]);
int a[len];
a[0] = 2;
#pragma omp parallel for
for (i=0;i<len;i++)
a[i]=a[i]+a[0];
return 0;
}
|
VolumetricMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricMaxUnpooling.c"
#else
static void nn_(VolumetricMaxUnpooling_updateOutput_frame)(real *input_p, real *output_p,
real *ind_p,
long nslices,
long itime, long iwidth, long iheight,
long otime, long owidth, long oheight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
long ti, i, j, maxz, maxy, maxx;
for(ti = 0; ti < itime; ti++)
{
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
long start_t = ti * dT - padT;
long start_h = i * dH - padH;
long start_w = j * dW - padW;
//real *output_p_k = output_p + k*otime*owidth*oheight + ti*owidth*oheight*dT + i*owidth*dH + j*dW;
real *input_p_k = input_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
real *ind_p_k = ind_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */
maxy = ((unsigned char*)(ind_p_k))[1];
maxx = ((unsigned char*)(ind_p_k))[2];
if(start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 || start_t+maxz>=otime || start_h+maxy>=oheight || start_w+maxx>=owidth)
{
THError("invalid max index z= %d, y= %d, x= %d, otime= %d, owidth= %d, oheight= %d", start_t+maxz, start_h+maxy, start_w+maxx, otime, owidth, oheight);
}
output_p[k*otime*owidth*oheight + oheight*owidth*(start_t+maxz) + owidth*(start_h+maxy) + (start_w+maxx)] = *input_p_k; /* update output */
}
}
}
}
}
static int nn_(VolumetricMaxUnpooling_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
int otime = luaT_getfieldcheckint(L, 1, "otime");
int owidth = luaT_getfieldcheckint(L, 1, "owidth");
int oheight = luaT_getfieldcheckint(L, 1, "oheight");
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int padT = luaT_getfieldcheckint(L, 1, "padT");
int padH = luaT_getfieldcheckint(L, 1, "padH");
int padW = luaT_getfieldcheckint(L, 1, "padW");
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int itime;
int iheight;
int iwidth;
real *input_data;
real *output_data;
real *indices_data;
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5 , 2, "4D or 5D (batch mode) tensor expected");
if (!THTensor_(isSameSizeAs)(input, indices)){
THError("Invalid input size w.r.t current indices size");
}
if (input->nDimension == 5)
{
nbatch = input->size[0];
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimt-1];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
/* get contiguous input */
input = THTensor_(newContiguous)(input);
indices = THTensor_(newContiguous)(indices);
/* resize output */
if (input->nDimension == 4)
{
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
nn_(VolumetricMaxUnpooling_updateOutput_frame)(input_data, output_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH, padT, padW, padH);
}
else
{
long p;
THTensor_(resize5d)(output, nbatch, nslices, otime, oheight, owidth);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
nn_(VolumetricMaxUnpooling_updateOutput_frame)(input_data+p*nslices*itime*iwidth*iheight, output_data+p*nslices*otime*owidth*oheight,
indices_data+p*nslices*itime*iwidth*iheight,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH, padT, padW, padH);
}
}
/* cleanup */
THTensor_(free)(input);
THTensor_(free)(indices);
return 1;
}
static void nn_(VolumetricMaxUnpooling_updateGradInput_frame)(real *gradInput_p, real *gradOutput_p,
real *ind_p,
long nslices,
long itime, long iwidth, long iheight,
long otime, long owidth, long oheight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
long ti, i, j, maxz, maxy, maxx;
for(ti = 0; ti < itime; ti++)
{
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
long start_t = ti * dT - padT;
long start_h = i * dH - padH;
long start_w = j * dW - padW;
real *gradInput_p_k = gradInput_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
//real *gradOutput_p_k = gradOutput_p + k*otime*owidth*oheight + ti*owidth*oheight*dT + i*owidth*dH + j*dW;
real *ind_p_k = ind_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */
maxy = ((unsigned char*)(ind_p_k))[1];
maxx = ((unsigned char*)(ind_p_k))[2];
if(start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 || start_t+maxz>=otime || start_h+maxy>=oheight || start_w+maxx>=owidth)
{
THError("invalid max index z= %d, y= %d, x= %d, otime= %d, owidth= %d, oheight= %d", start_t+maxz, start_h+maxy, start_w+maxx, otime, owidth, oheight);
}
*gradInput_p_k = gradOutput_p[k*otime*owidth*oheight + oheight*owidth*(start_t+maxz) + owidth*(start_h+maxy) + (start_w+maxx)]; /* update gradient */
}
}
}
}
}
static int nn_(VolumetricMaxUnpooling_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
int otime = luaT_getfieldcheckint(L, 1, "otime");
int owidth = luaT_getfieldcheckint(L, 1, "owidth");
int oheight = luaT_getfieldcheckint(L, 1, "oheight");
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int padT = luaT_getfieldcheckint(L, 1, "padT");
int padH = luaT_getfieldcheckint(L, 1, "padH");
int padW = luaT_getfieldcheckint(L, 1, "padW");
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int itime;
int iheight;
int iwidth;
real *gradInput_data;
real *gradOutput_data;
real *indices_data;
if (!THTensor_(isSameSizeAs)(input, indices)){
THError("Invalid input size w.r.t current indices size");
}
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 5) {
nbatch = input->size[0];
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimt-1];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
if(otime!=gradOutput->size[dimt] || owidth!=gradOutput->size[dimw] || oheight!=gradOutput->size[dimh]){
THError("Inconsistent gradOutput size. otime= %d, oheight= %d, owidth= %d, gradOutput: %dx%d", otime, oheight, owidth,gradOutput->size[dimh],gradOutput->size[dimw]);
}
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THTensor_(data)(indices);
/* backprop */
if (input->nDimension == 4)
{
nn_(VolumetricMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH,
padT, padW, padH);
}
else
{
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
nn_(VolumetricMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*itime*iwidth*iheight, gradOutput_data+p*nslices*otime*owidth*oheight,
indices_data+p*nslices*itime*iwidth*iheight,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH,
padT, padW, padH);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
THTensor_(free)(indices);
return 1;
}
static const struct luaL_Reg nn_(VolumetricMaxUnpooling__) [] = {
{"VolumetricMaxUnpooling_updateOutput", nn_(VolumetricMaxUnpooling_updateOutput)},
{"VolumetricMaxUnpooling_updateGradInput", nn_(VolumetricMaxUnpooling_updateGradInput)},
{NULL, NULL}
};
static void nn_(VolumetricMaxUnpooling_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(VolumetricMaxUnpooling__), "nn");
lua_pop(L,1);
}
#endif
|
TRPO_CG.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "omp.h"
#include "TRPO.h"
double CG(TRPOparam param, double *Result, double *b, size_t MaxIter, double ResidualTh, size_t NumThreads){
//////////////////// Conjugate Gradient ////////////////////
// This function implements Conjugate Gradient algorithm to solve linear equation Ax=b
// Result: The Conjugate Gradient Result, i.e. solution x to Ax=b
// b: Vector b in the equation Ax=b
// MaxIter: Maximum Iterations of Conjugate Gradient (in modular_rl is 10)
// ResidualTh: Threshold of Residual (in modular_rl is 1e-10)
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Memory Allocation
size_t NumParams = NumParamsCalc(param.LayerSize, param.NumLayers);
double * p = (double *) calloc(NumParams, sizeof(double));
double * r = (double *) calloc(NumParams, sizeof(double));
double * x = (double *) calloc(NumParams, sizeof(double));
double * z = (double *) calloc(NumParams, sizeof(double));
// Initialisation
double rdotr = 0;
for (size_t i=0; i<NumParams; ++i) {
p[i] = b[i];
r[i] = b[i];
rdotr += r[i] * r[i];
}
// Iterative Solver
// Measure Elapsed Time
struct timeval tv1, tv2;
double ComptimeS = 0;
for (size_t iter=0; iter<=MaxIter; ++iter) {
// Calculate Frobenius Norm of x
double FrobNorm = 0;
gettimeofday(&tv1, NULL);
#pragma omp parallel for reduction (+:FrobNorm)
for (size_t i=0; i<NumParams; ++i) {
FrobNorm += x[i] * x[i];
}
FrobNorm = sqrt(FrobNorm);
gettimeofday(&tv2, NULL);
printf("CG Iter[%zu] Residual Norm=%.12e, Soln Norm=%.12e\n", iter, rdotr, FrobNorm);
// Check Termination Condition
if (rdotr<ResidualTh || iter==MaxIter) {
for (size_t i=0; i<NumParams; ++i) Result[i] = x[i];
break;
}
// Calculate z = FIM*p
double FVPTime = FVPFast(param, z, p, NumThreads);
if (FVPTime<0) {
fprintf(stderr, "[ERROR] Fisher Vector Product Calculation Failed.\n");
free(p); free(r); free(x); free(z);
return -1;
}
else {
ComptimeS += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
ComptimeS += FVPTime;
}
// Update x and r
double pdotz = 0;
gettimeofday(&tv1, NULL);
#pragma omp parallel for reduction (+:pdotz)
for (size_t i=0; i<NumParams; ++i) {
pdotz += p[i] * z[i];
}
double v = rdotr / pdotz;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
x[i] += v * p[i];
r[i] -= v * z[i];
}
// Update p
double newrdotr = 0;
#pragma omp parallel for reduction (+:newrdotr)
for (size_t i=0; i<NumParams; ++i) {
newrdotr += r[i] * r[i];
}
double mu = newrdotr / rdotr;
#pragma omp parallel for
for (size_t i=0; i<NumParams; ++i) {
p[i] = r[i] + mu * p[i];
}
// Update rdotr
rdotr = newrdotr;
gettimeofday(&tv2, NULL);
ComptimeS += ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
}
// Clean Up
free(p); free(r); free(x); free(z);
return ComptimeS;
}
|
20_omp_priv_combi_nested.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S 2>&1 | %filecheck %s --check-prefix=check-opt
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S | %filecheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S | %filecheck %s --check-prefix=check-opt-inst
// REQUIRES: openmp
// clang-format on
#include "omp.h"
// NOTE: with opt, the compiler passes the address until the MPI_Send, hence
// only the initial allocation is tracked.
extern void MPI_Send(void*, int);
void func(int* x, int* e) {
// check-inst: define {{.*}} @func
// check-inst-NOT: call void @__typeart_alloc_stack
// check-opt-inst: define {{.*}} @func
// check-opt-inst-NOT: call void @__typeart_alloc_stack
// check-inst: define {{.*}} @.omp_outlined
// check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1)
// check-opt-inst: define {{.*}} @.omp_outlined
// check-opt-inst-NOT: call void @__typeart_alloc_stack_omp
#pragma omp parallel for firstprivate(x), lastprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send((void*)x, *e);
}
}
void foo() {
// check-inst: define {{.*}} @foo
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
// check-opt-inst: define {{.*}} @foo
// check-opt-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = 1;
int y = 2;
#pragma omp parallel
{ func(&x, &y); }
}
void func_other(int* x, int* e) {
// check-inst: define {{.*}} @func_other
// check-inst-NOT: call void @__typeart_alloc_stack
// check-opt-inst: define {{.*}} @func_other
// check-opt-inst-NOT: call void @__typeart_alloc_stack
// check-inst: define {{.*}} @.omp_outlined
// check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1)
// check-opt-inst: define {{.*}} @.omp_outlined
// check-opt-inst-NOT: call void @__typeart_alloc_stack_omp
#pragma omp parallel for firstprivate(x), lastprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send(x, *e);
}
MPI_Send(x, *e);
}
void bar(int x_other) {
// check-inst: define {{.*}} @bar
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
// check-opt-inst: define {{.*}} @bar
// check-opt-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = x_other;
int y = 2;
#pragma omp parallel
{ func_other(&x, &y); }
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 4
// CHECK-NEXT: Global : 0
// check-opt: TypeArtPass [Heap & Stack]
// check-opt: Malloc : 0
// check-opt: Free : 0
// check-opt: Alloca : 2
// check-opt: Global : 0 |
Diffusion_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Diffusion_core.h"
#include "utils.h"
#define EPS 1.0e-5
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
/*sign function*/
int signNDFc(float x) {
return (x > 0) - (x < 0);
}
/* C-OMP implementation of linear and nonlinear diffusion with the regularisation model [1,2] (2D/3D case)
* The minimisation is performed using explicit scheme.
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambda - regularization parameter
* 3. Edge-preserving parameter (sigma), when sigma equals to zero nonlinear diffusion -> linear diffusion
* 4. Number of iterations, for explicit scheme >= 150 is recommended
* 5. tau - time-marching step for explicit scheme
* 6. Penalty type: 1 - Huber, 2 - Perona-Malik, 3 - Tukey Biweight, 4 - Threshold-constrained Linear, , 5 - modified Huber with a dead stop on edge
* 7. eplsilon - tolerance constant
*
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the paper by
* [1] Perona, P. and Malik, J., 1990. Scale-space and edge detection using anisotropic diffusion. IEEE Transactions on pattern analysis and machine intelligence, 12(7), pp.629-639.
* [2] Black, M.J., Sapiro, G., Marimont, D.H. and Heeger, D., 1998. Robust anisotropic diffusion. IEEE Transactions on image processing, 7(3), pp.421-432.
*/
float Diffusion_CPU_main(float *Input, float *Output, float *infovector, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int penaltytype, float epsil, int dimX, int dimY, int dimZ)
{
int i;
float sigmaPar2, *Output_prev=NULL;
sigmaPar2 = sigmaPar/sqrt(2.0f);
long j, DimTotal;
float re, re1;
re = 0.0f; re1 = 0.0f;
int count = 0;
DimTotal = (long)(dimX*dimY*dimZ);
if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
for(i=0; i < iterationsNumb; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ == 1) {
/* running 2D diffusion iterations */
if (sigmaPar == 0.0f) LinearDiff2D(Input, Output, lambdaPar, tau, (long)(dimX), (long)(dimY)); /* linear diffusion (heat equation) */
else NonLinearDiff2D(Input, Output, lambdaPar, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY)); /* nonlinear diffusion */
}
else {
/* running 3D diffusion iterations */
if (sigmaPar == 0.0f) LinearDiff3D(Input, Output, lambdaPar, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
else NonLinearDiff3D(Input, Output, lambdaPar, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY), (long)(dimZ));
}
/* check early stopping criteria if epsilon not equal zero */
if ((epsil != 0.0f) && (i % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(Output[j] - Output_prev[j],2);
re1 += powf(Output[j],2);
}
re = sqrtf(re)/sqrtf(re1);
/* stop if the norm residual is less than the tolerance EPS */
if (re < epsil) count++;
if (count > 3) break;
}
}
free(Output_prev);
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
/* linear diffusion (heat equation) */
float LinearDiff2D(float *Input, float *Output, float lambdaPar, float tau, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float e,w,n,s,e1,w1,n1,s1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1)
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = j*dimX+i;
e = Output[j*dimX+i1];
w = Output[j*dimX+i2];
n = Output[j1*dimX+i];
s = Output[j2*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1) - (Output[index] - Input[index]));
}}
return *Output;
}
/* nonlinear diffusion */
float NonLinearDiff2D(float *Input, float *Output, float lambdaPar, float sigmaPar, float tau, int penaltytype, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float e,w,n,s,e1,w1,n1,s1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1)
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = j*dimX+i;
e = Output[j*dimX+i1];
w = Output[j*dimX+i2];
n = Output[j1*dimX+i];
s = Output[j2*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
if (penaltytype == 1){
/* Huber penalty */
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar;
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar;
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar;
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar;
}
else if (penaltytype == 2) {
/* Perona-Malik */
e1 /= (1.0f + powf((e1/sigmaPar),2));
w1 /= (1.0f + powf((w1/sigmaPar),2));
n1 /= (1.0f + powf((n1/sigmaPar),2));
s1 /= (1.0f + powf((s1/sigmaPar),2));
}
else if (penaltytype == 3) {
/* Tukey Biweight */
if (fabs(e1) <= sigmaPar) e1 = e1*powf((1.0f - powf((e1/sigmaPar),2)), 2);
else e1 = 0.0f;
if (fabs(w1) <= sigmaPar) w1 = w1*powf((1.0f - powf((w1/sigmaPar),2)), 2);
else w1 = 0.0f;
if (fabs(n1) <= sigmaPar) n1 = n1*powf((1.0f - powf((n1/sigmaPar),2)), 2);
else n1 = 0.0f;
if (fabs(s1) <= sigmaPar) s1 = s1*powf((1.0f - powf((s1/sigmaPar),2)), 2);
else s1 = 0.0f;
}
else if (penaltytype == 4) {
/* Threshold-constrained linear diffusion
This means that the linear diffusion will be performed on pixels with
absolute difference less than the threshold.
*/
if (fabs(e1) > sigmaPar) e1 = 0.0f;
if (fabs(w1) > sigmaPar) w1 = 0.0f;
if (fabs(n1) > sigmaPar) n1 = 0.0f;
if (fabs(s1) > sigmaPar) s1 = 0.0f;
}
else if (penaltytype == 5) {
/*
Threshold constrained Huber diffusion
*/
if (fabs(e1) <= 2.0f*sigmaPar) {
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar; }
else e1 = 0.0f;
if (fabs(w1) <= 2.0f*sigmaPar) {
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar; }
else w1 = 0.0f;
if (fabs(n1) <= 2.0f*sigmaPar) {
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar; }
else n1 = 0.0f;
if (fabs(s1) <= 2.0f*sigmaPar) {
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar; }
else s1 = 0.0f;
}
else {
printf("%s \n", "No penalty function selected! Use 1,2,3,4 or 5.");
break;
}
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1) - (Output[index] - Input[index]));
}}
return *Output;
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
/* linear diffusion (heat equation) */
float LinearDiff3D(float *Input, float *Output, float lambdaPar, float tau, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float e,w,n,s,u,d,e1,w1,n1,s1,u1,d1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1,k,k1,k2,u1,d1,u,d)
for(k=0; k<dimZ; k++) {
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = (dimX*dimY)*k + j*dimX+i;
e = Output[(dimX*dimY)*k + j*dimX+i1];
w = Output[(dimX*dimY)*k + j*dimX+i2];
n = Output[(dimX*dimY)*k + j1*dimX+i];
s = Output[(dimX*dimY)*k + j2*dimX+i];
u = Output[(dimX*dimY)*k1 + j*dimX+i];
d = Output[(dimX*dimY)*k2 + j*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
u1 = u - Output[index];
d1 = d - Output[index];
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1 + u1 + d1) - (Output[index] - Input[index]));
}}}
return *Output;
}
float NonLinearDiff3D(float *Input, float *Output, float lambdaPar, float sigmaPar, float tau, int penaltytype, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float e,w,n,s,u,d,e1,w1,n1,s1,u1,d1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1,k,k1,k2,u1,d1,u,d)
for(k=0; k<dimZ; k++) {
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = (dimX*dimY)*k + j*dimX+i;
e = Output[(dimX*dimY)*k + j*dimX+i1];
w = Output[(dimX*dimY)*k + j*dimX+i2];
n = Output[(dimX*dimY)*k + j1*dimX+i];
s = Output[(dimX*dimY)*k + j2*dimX+i];
u = Output[(dimX*dimY)*k1 + j*dimX+i];
d = Output[(dimX*dimY)*k2 + j*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
u1 = u - Output[index];
d1 = d - Output[index];
if (penaltytype == 1){
/* Huber penalty */
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar;
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar;
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar;
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar;
if (fabs(u1) > sigmaPar) u1 = signNDFc(u1);
else u1 = u1/sigmaPar;
if (fabs(d1) > sigmaPar) d1 = signNDFc(d1);
else d1 = d1/sigmaPar;
}
else if (penaltytype == 2) {
/* Perona-Malik */
e1 = (e1)/(1.0f + powf((e1/sigmaPar),2));
w1 = (w1)/(1.0f + powf((w1/sigmaPar),2));
n1 = (n1)/(1.0f + powf((n1/sigmaPar),2));
s1 = (s1)/(1.0f + powf((s1/sigmaPar),2));
u1 = (u1)/(1.0f + powf((u1/sigmaPar),2));
d1 = (d1)/(1.0f + powf((d1/sigmaPar),2));
}
else if (penaltytype == 3) {
/* Tukey Biweight */
if (fabs(e1) <= sigmaPar) e1 = e1*powf((1.0f - powf((e1/sigmaPar),2)), 2);
else e1 = 0.0f;
if (fabs(w1) <= sigmaPar) w1 = w1*powf((1.0f - powf((w1/sigmaPar),2)), 2);
else w1 = 0.0f;
if (fabs(n1) <= sigmaPar) n1 = n1*powf((1.0f - powf((n1/sigmaPar),2)), 2);
else n1 = 0.0f;
if (fabs(s1) <= sigmaPar) s1 = s1*powf((1.0f - powf((s1/sigmaPar),2)), 2);
else s1 = 0.0f;
if (fabs(u1) <= sigmaPar) u1 = u1*powf((1.0f - powf((u1/sigmaPar),2)), 2);
else u1 = 0.0f;
if (fabs(d1) <= sigmaPar) d1 = d1*powf((1.0f - powf((d1/sigmaPar),2)), 2);
else d1 = 0.0f;
}
else if (penaltytype == 4) {
/* Threshold-constrained linear diffusion
This means that the linear diffusion will be performed on pixels with
absolute difference less than the threshold.
*/
if (fabs(e1) > sigmaPar) e1 = 0.0f;
if (fabs(w1) > sigmaPar) w1 = 0.0f;
if (fabs(n1) > sigmaPar) n1 = 0.0f;
if (fabs(s1) > sigmaPar) s1 = 0.0f;
if (fabs(u1) > sigmaPar) u1 = 0.0f;
if (fabs(d1) > sigmaPar) d1 = 0.0f;
}
else if (penaltytype == 5) {
/*
Threshold constrained Huber diffusion
*/
if (fabs(e1) <= 2.0f*sigmaPar) {
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar; }
else e1 = 0.0f;
if (fabs(w1) <= 2.0f*sigmaPar) {
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar; }
else w1 = 0.0f;
if (fabs(n1) <= 2.0f*sigmaPar) {
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar; }
else n1 = 0.0f;
if (fabs(s1) <= 2.0f*sigmaPar) {
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar; }
else s1 = 0.0f;
if (fabs(u1) <= 2.0f*sigmaPar) {
if (fabs(u1) > sigmaPar) u1 = signNDFc(u1);
else u1 = u1/sigmaPar; }
else u1 = 0.0f;
if (fabs(d1) <= 2.0f*sigmaPar) {
if (fabs(d1) > sigmaPar) d1 = signNDFc(d1);
else d1 = d1/sigmaPar; }
else d1 = 0.0f;
}
else {
printf("%s \n", "No penalty function selected! Use 1,2,3,4 or 5.");
break;
}
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1 + u1 + d1) - (Output[index] - Input[index]));
}}}
return *Output;
}
|
GB_unaryop__ainv_int32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_uint32
// op(A') function: GB_tran__ainv_int32_uint32
// C type: int32_t
// A type: uint32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_uint32
(
int32_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
diagmm_x_csr_n_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "memory.h"
#include "alphasparse/opt.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
ALPHA_Number *diag = alpha_malloc(mat->rows * sizeof(ALPHA_Number));
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT ar = 0; ar < mat->rows; ++ar)
{
alpha_setzero(diag[ar]);
for (ALPHA_INT ai = mat->rows_start[ar]; ai < mat->rows_end[ar]; ++ai)
if (mat->col_indx[ai] == ar)
{
diag[ar] = mat->values[ai];
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
for (ALPHA_INT cr = 0; cr < mat->rows; ++cr)
{
ALPHA_Number val;
alpha_mule(y[index2(cc, cr, ldy)], beta);
alpha_mul(val, alpha, diag[cr]);
alpha_madde(y[index2(cc, cr, ldy)], val, x[index2(cc, cr, ldx)]);
}
alpha_free(diag);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
ch_ompss.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include "ch_common.h"
#include "../timing.h"
void cholesky_mpi(const int ts, const int nt, double *A[nt][nt], double *B, double *C[nt], int *block_rank)
{
INIT_TIMING(omp_get_max_threads());
#pragma omp parallel
{
#pragma omp single
{
START_TIMING(TIME_TOTAL);
{
START_TIMING(TIME_CREATE);
for (int k = 0; k < nt; k++) {
if (block_rank[k*nt+k] == mype) {
#pragma omp task depend(out: A[k][k]) firstprivate(k)
{
//printf("Computing potrf in k=%d\n", k);
START_TIMING(TIME_POTRF);
omp_potrf(A[k][k], ts, ts);
END_TIMING(TIME_POTRF);
}
}
int comm_sentinel; // <-- sentinel, never actual referenced
if (block_rank[k*nt+k] == mype && np != 1) {
// use comm_sentinel to make sure this task runs before the communication tasks below
#pragma omp task depend(in: A[k][k], comm_sentinel) firstprivate(k)
{
//printf("Communicating potrf in k=%d\n", k);
START_TIMING(TIME_COMM);
MPI_Request *reqs = NULL;
int nreqs = 0;
char send_flags[np];
reset_send_flags(send_flags);
for (int kk = k+1; kk < nt; kk++) {
if (!send_flags[block_rank[k*nt+kk]]) {
++nreqs;
send_flags[block_rank[k*nt+kk]] = 1;
}
}
reqs = malloc(sizeof(MPI_Request)*nreqs);
nreqs = 0;
for (int dst = 0; dst < np; dst++) {
if (send_flags[dst] && dst != mype) {
MPI_Request send_req;
//printf("Sending potrf block to %d in k=%d\n", dst, k);
MPI_Isend(A[k][k], ts*ts, MPI_DOUBLE, dst, k*nt+k, MPI_COMM_WORLD, &send_req);
reqs[nreqs++] = send_req;
}
}
//printf("Waiting for potrf block in k=%d\n", k);
waitall(reqs, nreqs);
free(reqs);
END_TIMING(TIME_COMM);
}
} else if (block_rank[k*nt+k] != mype) {
// use comm_sentinel to make sure this task runs before the communication tasks below
#pragma omp task depend(out: B) depend(in:comm_sentinel) firstprivate(k)
{
START_TIMING(TIME_COMM);
int recv_flag = 0;
for (int i = k + 1; i < nt; i++) {
if (block_rank[k*nt+i] == mype) {
recv_flag = 1;
break;
}
}
if (recv_flag) {
MPI_Request recv_req;
MPI_Irecv(B, ts*ts, MPI_DOUBLE, block_rank[k*nt+k], k*nt+k, MPI_COMM_WORLD, &recv_req);
//printf("Receiving potrf block from %d in k=%d\n", block_rank[k*nt+k], k);
waitall(&recv_req, 1);
}
END_TIMING(TIME_COMM);
}
}
for (int i = k + 1; i < nt; i++) {
if (block_rank[k*nt+i] == mype) {
if (block_rank[k*nt+k] == mype) {
#pragma omp task depend(in: A[k][k], comm_sentinel) depend(out: A[k][i]) firstprivate(k, i)
{
START_TIMING(TIME_TRSM);
omp_trsm(A[k][k], A[k][i], ts, ts);
END_TIMING(TIME_TRSM);
}
} else {
#pragma omp task depend(in: B, comm_sentinel) depend(out: A[k][i]) firstprivate(k, i)
{
START_TIMING(TIME_TRSM);
omp_trsm(B, A[k][i], ts, ts);
END_TIMING(TIME_TRSM);
}
}
}
}
#pragma omp task depend(inout: comm_sentinel) firstprivate(k) shared(A)
{
START_TIMING(TIME_COMM);
char send_flags[np];
reset_send_flags(send_flags);
int nreqs = 0;
// upper bound in case all our blocks have to be sent
int max_req = (nt-k)*(np-1);
MPI_Request *reqs = malloc(sizeof(*reqs)*max_req);
for (int i = k + 1; i < nt; i++) {
if (block_rank[k*nt+i] == mype && np != 1) {
for (int ii = k + 1; ii < i; ii++) {
if (!send_flags[block_rank[ii*nt+i]]) {
send_flags[block_rank[ii*nt+i]] = 1;
}
}
for (int ii = i + 1; ii < nt; ii++) {
if (!send_flags[block_rank[i*nt+ii]]) {
send_flags[block_rank[i*nt+ii]] = 1;
}
}
if (!send_flags[block_rank[i*nt+i]]) send_flags[block_rank[i*nt+i]] = 1;
for (int dst = 0; dst < np; dst++) {
if (send_flags[dst] && dst != mype) {
MPI_Request send_req;
MPI_Isend(A[k][i], ts*ts, MPI_DOUBLE, dst, k*nt+i, MPI_COMM_WORLD, &send_req);
reqs[nreqs++] = send_req;
}
}
reset_send_flags(send_flags);
}
if (block_rank[k*nt+i] != mype) {
int recv_flag = 0;
for (int ii = k + 1; ii < i; ii++) {
if (block_rank[ii*nt+i] == mype) recv_flag = 1;
}
for (int ii = i + 1; ii < nt; ii++) {
if (block_rank[i*nt+ii] == mype) recv_flag = 1;
}
if (block_rank[i*nt+i] == mype) recv_flag = 1;
if (recv_flag) {
MPI_Request recv_req;
MPI_Irecv(C[i], ts*ts, MPI_DOUBLE, block_rank[k*nt+i], k*nt+i, MPI_COMM_WORLD, &recv_req);
reqs[nreqs++] = recv_req;
}
}
}
//printf("Waiting for trsm blocks in k=%d\n", k);
waitall(reqs, nreqs);
free(reqs);
END_TIMING(TIME_COMM);
}
for (int i = k + 1; i < nt; i++) {
for (int j = k + 1; j < i; j++) {
if (block_rank[j*nt+i] == mype) {
if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] == mype) {
#pragma omp task depend(in: A[k][i], A[k][j]) depend(out: A[j][i]) firstprivate(k, j, i)
{
START_TIMING(TIME_GEMM);
omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
}
} else if (block_rank[k*nt+i] != mype && block_rank[k*nt+j] == mype) {
#pragma omp task depend(in: A[k][j], comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i)
{
START_TIMING(TIME_GEMM);
omp_gemm(C[i], A[k][j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
}
} else if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] != mype) {
#pragma omp task depend(in: A[k][i], comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i)
{
START_TIMING(TIME_GEMM);
omp_gemm(A[k][i], C[j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
}
} else {
#pragma omp task depend(in: comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i)
{
START_TIMING(TIME_GEMM);
omp_gemm(C[i], C[j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
}
}
}
}
if (block_rank[i*nt+i] == mype) {
if (block_rank[k*nt+i] == mype) {
#pragma omp task depend(in: A[k][i]) depend(out: A[i][i]) firstprivate(k, i)
{
START_TIMING(TIME_SYRK);
omp_syrk(A[k][i], A[i][i], ts, ts);
END_TIMING(TIME_SYRK);
}
} else {
#pragma omp task depend(in: comm_sentinel) depend(out: A[i][i]) firstprivate(k, i)
{
START_TIMING(TIME_SYRK);
omp_syrk(C[i], A[i][i], ts, ts);
END_TIMING(TIME_SYRK);
}
}
}
}
}
END_TIMING(TIME_CREATE);
}
#pragma omp taskwait
END_TIMING(TIME_TOTAL);
MPI_Barrier(MPI_COMM_WORLD);
}// pragma omp single
}// pragma omp parallel
PRINT_TIMINGS();
FREE_TIMING();
}
|
GB_unaryop__lnot_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_bool
// op(A') function: GB_tran__lnot_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_transform_kernel_pack4to1_neon(const Mat& weight_data, Mat& weight_data_pack4to1, int num_input, int num_output, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// src = kw-kh-inch-outch
// dst = 4a-kw-kh-inch/4a-outch
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
weight_data_pack4to1.create(maxk, num_input / 4, num_output, (size_t)4 * 4, 4);
for (int q = 0; q < num_output; q++)
{
const Mat k0 = weight_data_r2.channel(q);
Mat g0 = weight_data_pack4to1.channel(q);
for (int p = 0; p + 3 < num_input; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
float* g00 = g0.row(p / 4);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k02[k];
g00[3] = k03[k];
g00 += 4;
}
}
}
}
static void convolution_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
const float* kptr = (const float*)weight_data_pack4to1 + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++) // 29.23
{
float32x4_t _val = vld1q_f32(sptr + space_ofs[k] * 4);
float32x4_t _w = vld1q_f32(kptr);
float32x4_t _s4 = vmulq_f32(_val, _w);
#if __aarch64__
sum += vaddvq_f32(_s4); // dot
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_s4), vget_high_f32(_s4));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#endif
kptr += 4;
}
}
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
GB_binop__minus_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int16)
// A*D function (colscale): GB (_AxD__minus_int16)
// D*A function (rowscale): GB (_DxB__minus_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int16)
// C=scalar+B GB (_bind1st__minus_int16)
// C=scalar+B' GB (_bind1st_tran__minus_int16)
// C=A+scalar GB (_bind2nd__minus_int16)
// C=A'+scalar GB (_bind2nd_tran__minus_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
struct-enter-exit-data-1.c | /* Check 'GOMP_MAP_STRUCT' mapping, and in particular that it gets removed from
OpenACC 'exit data' directives. */
/* { dg-additional-options "-fdump-tree-gimple" } */
struct str {
int a;
int *b;
int *c;
int d;
int *e;
int f;
};
#define N 1024
void
test (int *b, int *c, int *e)
{
struct str s = { .a = 0, .b = b, .c = c, .d = 0, .e = e, .f = 0 };
#pragma acc enter data copyin(s.a, s.b[0:N], s.c[0:N] /* , s.d */ /* , s.e[0:N] */, s.f)
/* { dg-final { scan-tree-dump {(?n)#pragma omp target oacc_enter_exit_data map\(struct:s \[len: 4\]\) map\(to:s.a \[len: [0-9]+\]\) map\(alloc:s.b \[len: [0-9]+\]\) map\(alloc:s.c \[len: [0-9]+\]\) map\(to:s.f \[len: [0-9]+\]\) map\(to:\*[_0-9]+ \[len: [0-9]+\]\) map\(attach:s.b \[bias: 0\]\) map\(to:\*[_0-9]+ \[len: [0-9]+\]\) map\(attach:s.c \[bias: 0\]\)$} gimple } } */
#pragma acc exit data copyout(s.a, s.b[0:N], s.c[0:N] /* , s.d */ /* , s.e[0:N] */, s.f)
/* { dg-final { scan-tree-dump {(?n)#pragma omp target oacc_enter_exit_data map\(from:s.a \[len: [0-9]+\]\) map\(release:s.b \[len: [0-9]+\]\) map\(release:s.c \[len: [0-9]+\]\) map\(from:s.f \[len: [0-9]+\]\) map\(from:\*[_0-9]+ \[len: [0-9]+\]\) map\(detach:s.b \[bias: 0\]\) map\(from:\*[_0-9]+ \[len: [0-9]+\]\) map\(detach:s.c \[bias: 0\]\)$} gimple } } */
}
|
util.c | #include "util.h"
#include <stdio.h>
inline void set_vector_double(double *v, int c, double val)
{
#pragma omp parallel for
for (int i = 0; i < c; i++)
v[i] = val;
}
inline void print_vector(double *y, const matrix_info_t mi)
{
// Write the results to standard output.
for (int i = 0; i < MIN(PRINT_MAX_ROWS,mi.num_rows); i++)
fprintf(stdout, "%6g ", y[i]);
fprintf(stdout, "\n");
}
void log_execution(const char *matrix_format, int iterations, float time)
{
printf("%-34s %.6f seconds, %.6f sec/it\n", matrix_format, time, time / (float)iterations);
}
void print_header(matrix_info_t mi, int iterations)
{
printf("%dx%d matrix, %d nonzero elements, %d max nonzeroes per row, %d iterations\n",
mi.num_rows, mi.num_columns, mi.num_nonzeros, mi.max_nonzeros_per_row, iterations);
}
float time_spent(struct timespec t0, struct timespec t1)
{
return ((t1.tv_sec - t0.tv_sec)) + ((t1.tv_nsec - t0.tv_nsec)) * 1e-9;
}
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
MAIN_ENV
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif //ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#endif
//Precision to use for calculations
#define fptype float
#define NUM_RUNS 100
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int * otype;
fptype * sptprice;
fptype * strike;
fptype * rate;
fptype * volatility;
fptype * otime;
int numErrors = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF ( fptype InputX )
{
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv( fptype sptprice,
fptype strike, fptype rate, fptype volatility,
fptype time, int otype, float timet )
{
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log( sptprice / strike );
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF( d1 );
NofXd2 = CNDF( d2 );
FutureValueX = strike * ( exp( -(rate)*(time) ) );
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i=begin; i!=end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-5 ){
fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j=0; j<NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 0;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr){
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
for (j=0; j<NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i=0; i<numOptions; i++) {
#else //ENABLE_OPENMP
for (i=start; i<end; i++) {
#endif //ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-4 ){
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
return 0;
}
#endif //ENABLE_TBB
int main (int argc, char **argv)
{
FILE *file;
int i;
int loopnum;
fptype * buffer;
int * buffer2;
int rv;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif //PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
if (argc != 4)
{
printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
exit(1);
}
nThreads = atoi(argv[1]);
char *inputFile = argv[2];
char *outputFile = argv[3];
//Read input data from file
file = fopen(inputFile, "r");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", inputFile);
exit(1);
}
rv = fscanf(file, "%i", &numOptions);
if(rv != 1) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
if(nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if(nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
exit(1);
}
#endif
// alloc spaces for the option data
data = (OptionData*)malloc(numOptions*sizeof(OptionData));
prices = (fptype*)malloc(numOptions*sizeof(fptype));
for ( loopnum = 0; loopnum < numOptions; ++ loopnum )
{
rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval);
if(rv != 9) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", inputFile);
exit(1);
}
#ifdef ENABLE_THREADS
MAIN_INITENV(,8000000,nThreads);
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i=0; i<numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
HANDLE *threads;
int *nums;
threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE));
nums = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
tids[i]=i;
CREATE_WITH_ARG(bs_thread, &tids[i]);
}
WAIT_FOR_END(nThreads);
free(tids);
#endif //WIN32
#else //ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid=0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else //ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid=0;
bs_thread(&tid);
#else //ENABLE_TBB
//serial version
int tid=0;
bs_thread(&tid);
#endif //ENABLE_TBB
#endif //ENABLE_OPENMP
#endif //ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
//Write prices to output file
file = fopen(outputFile, "w");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", outputFile);
exit(1);
}
rv = fprintf(file, "%i\n", numOptions);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
for(i=0; i<numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", outputFile);
exit(1);
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
GB_unop__identity_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_bool
// op(A') function: GB_unop_tran__identity_uint16_bool
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_bool
(
uint16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | /* $Id: main.c,v 1.4 2004/04/21 04:23:43 pohlt Exp $ */
/*############################################################################*/
#include "main.h"
#include "lbm.h"
#include <stdio.h>
#include <stdlib.h>
#if defined(SPEC)
# include <time.h>
#else
# include <sys/times.h>
# include <unistd.h>
#endif
#include <sys/stat.h>
/*############################################################################*/
static LBM_GridPtr srcGrid, dstGrid;
size_t gridSize;
size_t marginSize;
double * src;
double * dst;
/*############################################################################*/
int main( int nArgs, char* arg[] ) {
MAIN_Param param;
int t;
MAIN_parseCommandLine( nArgs, arg, ¶m );
MAIN_printInfo( ¶m );
MAIN_initialize( ¶m );
#pragma omp target data map(tofrom:src[0:gridSize]), map(to:dst[0:gridSize])
{
for( t = 1; t <= param.nTimeSteps; t++ ) {
if( param.simType == CHANNEL ) {
LBM_handleInOutFlow( *srcGrid );
}
LBM_performStreamCollide( *srcGrid, *dstGrid );
LBM_swapGrids( &srcGrid, &dstGrid );
if( (t & 63) == 0 ) {
#pragma omp target update from(src[0:gridSize])
printf( "timestep: %i\n", t );
LBM_showGridStatistics( *srcGrid );
}
}
}
MAIN_finalize( ¶m );
return 0;
}
/*############################################################################*/
void MAIN_parseCommandLine( int nArgs, char* arg[], MAIN_Param* param ) {
struct stat fileStat;
int adjustArgs = 0;
/* SPEC - handle one of --device/--platform */
if ( nArgs == 8 ) adjustArgs+= 2;
/* SPEC - handle both --device/--platform */
if ( nArgs == 10 ) adjustArgs+= 4;
if( nArgs < adjustArgs+5 || nArgs > adjustArgs+6 ) {
printf( "syntax: lbm <time steps> <result file> <0: nil, 1: cmp, 2: str> <0: ldc, 1: channel flow> [<obstacle file>]\n" );
exit( 1 );
}
param->nTimeSteps = atoi( arg[adjustArgs+1] );
param->resultFilename = arg[adjustArgs+2];
param->action = (MAIN_Action) atoi( arg[adjustArgs+3] );
param->simType = (MAIN_SimType) atoi( arg[adjustArgs+4] );
if( nArgs == adjustArgs+6 ) {
param->obstacleFilename = arg[adjustArgs+5];
if( stat( param->obstacleFilename, &fileStat ) != 0 ) {
printf( "MAIN_parseCommandLine: cannot stat obstacle file '%s'\n",
param->obstacleFilename );
exit( 1 );
}
if( fileStat.st_size != SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z ) {
printf( "MAIN_parseCommandLine:\n"
"\tsize of file '%s' is %i bytes\n"
"\texpected size is %i bytes\n",
param->obstacleFilename, (int) fileStat.st_size,
SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z );
exit( 1 );
}
}
else param->obstacleFilename = NULL;
if( param->action == COMPARE &&
stat( param->resultFilename, &fileStat ) != 0 ) {
printf( "MAIN_parseCommandLine: cannot stat result file '%s'\n",
param->resultFilename );
exit( 1 );
}
}
/*############################################################################*/
void MAIN_printInfo( const MAIN_Param* param ) {
const char actionString[3][32] = {"nothing", "compare", "store"};
const char simTypeString[3][32] = {"lid-driven cavity", "channel flow"};
printf( "MAIN_printInfo:\n"
"\tgrid size : %i x %i x %i = %.2f * 10^6 Cells\n"
"\tnTimeSteps : %i\n"
"\tresult file : %s\n"
"\taction : %s\n"
"\tsimulation type: %s\n"
"\tobstacle file : %s\n\n",
SIZE_X, SIZE_Y, SIZE_Z, 1e-6*SIZE_X*SIZE_Y*SIZE_Z,
param->nTimeSteps, param->resultFilename,
actionString[param->action], simTypeString[param->simType],
(param->obstacleFilename == NULL) ? "<none>" :
param->obstacleFilename );
}
/*############################################################################*/
void MAIN_initialize( const MAIN_Param* param) {
LBM_allocateGrid( (double**) &srcGrid, (double**) &src );
LBM_allocateGrid( (double**) &dstGrid, (double**) &dst );
LBM_initializeGrid( *srcGrid );
LBM_initializeGrid( *dstGrid );
if( param->obstacleFilename != NULL ) {
LBM_loadObstacleFile( *srcGrid, param->obstacleFilename );
LBM_loadObstacleFile( *dstGrid, param->obstacleFilename );
}
if( param->simType == CHANNEL ) {
LBM_initializeSpecialCellsForChannel( *srcGrid );
LBM_initializeSpecialCellsForChannel( *dstGrid );
}
else {
LBM_initializeSpecialCellsForLDC( *srcGrid );
LBM_initializeSpecialCellsForLDC( *dstGrid );
}
LBM_showGridStatistics( *srcGrid );
}
/*############################################################################*/
void MAIN_finalize( const MAIN_Param* param ) {
LBM_showGridStatistics( *srcGrid );
if( param->action == COMPARE )
LBM_compareVelocityField( *srcGrid, param->resultFilename, TRUE );
if( param->action == STORE )
LBM_storeVelocityField( *srcGrid, param->resultFilename, TRUE );
LBM_freeGrid( (double**) &srcGrid );
LBM_freeGrid( (double**) &dstGrid );
}
|
GB_unaryop__lnot_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_uint64
// op(A') function: GB_tran__lnot_int32_uint64
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_uint64
(
int32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sort-1.c | /* Test and benchmark of a couple of parallel sorting algorithms.
Copyright (C) 2008-2017 Free Software Foundation, Inc.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include <limits.h>
#include <omp.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int failures;
#define THRESHOLD 100
static void
verify (const char *name, double stime, int *array, int count)
{
int i;
double etime = omp_get_wtime ();
printf ("%s: %g\n", name, etime - stime);
for (i = 1; i < count; i++)
if (array[i] < array[i - 1])
{
printf ("%s: incorrectly sorted\n", name);
failures = 1;
}
}
static void
insertsort (int *array, int s, int e)
{
int i, j, val;
for (i = s + 1; i <= e; i++)
{
val = array[i];
j = i;
while (j-- > s && val < array[j])
array[j + 1] = array[j];
array[j + 1] = val;
}
}
struct int_pair
{
int lo;
int hi;
};
struct int_pair_stack
{
struct int_pair *top;
#define STACK_SIZE 4 * CHAR_BIT * sizeof (int)
struct int_pair arr[STACK_SIZE];
};
static inline void
init_int_pair_stack (struct int_pair_stack *stack)
{
stack->top = &stack->arr[0];
}
static inline void
push_int_pair_stack (struct int_pair_stack *stack, int lo, int hi)
{
stack->top->lo = lo;
stack->top->hi = hi;
stack->top++;
}
static inline void
pop_int_pair_stack (struct int_pair_stack *stack, int *lo, int *hi)
{
stack->top--;
*lo = stack->top->lo;
*hi = stack->top->hi;
}
static inline int
size_int_pair_stack (struct int_pair_stack *stack)
{
return stack->top - &stack->arr[0];
}
static inline void
busy_wait (void)
{
#if defined __i386__ || defined __x86_64__
__builtin_ia32_pause ();
#elif defined __ia64__
__asm volatile ("hint @pause" : : : "memory");
#elif defined __sparc__ && (defined __arch64__ || defined __sparc_v9__)
__asm volatile ("membar #LoadLoad" : : : "memory");
#else
__asm volatile ("" : : : "memory");
#endif
}
static inline void
swap (int *array, int a, int b)
{
int val = array[a];
array[a] = array[b];
array[b] = val;
}
static inline int
choose_pivot (int *array, int lo, int hi)
{
int mid = (lo + hi) / 2;
if (array[mid] < array[lo])
swap (array, lo, mid);
if (array[hi] < array[mid])
{
swap (array, mid, hi);
if (array[mid] < array[lo])
swap (array, lo, mid);
}
return array[mid];
}
static inline int
partition (int *array, int lo, int hi)
{
int pivot = choose_pivot (array, lo, hi);
int left = lo;
int right = hi;
for (;;)
{
while (array[++left] < pivot);
while (array[--right] > pivot);
if (left >= right)
break;
swap (array, left, right);
}
return left;
}
static void
sort1 (int *array, int count)
{
omp_lock_t lock;
struct int_pair_stack global_stack;
int busy = 1;
int num_threads;
omp_init_lock (&lock);
init_int_pair_stack (&global_stack);
#pragma omp parallel firstprivate (array, count)
{
int lo = 0, hi = 0, mid, next_lo, next_hi;
bool idle = true;
struct int_pair_stack local_stack;
init_int_pair_stack (&local_stack);
if (omp_get_thread_num () == 0)
{
num_threads = omp_get_num_threads ();
hi = count - 1;
idle = false;
}
for (;;)
{
if (hi - lo < THRESHOLD)
{
insertsort (array, lo, hi);
lo = hi;
}
if (lo >= hi)
{
if (size_int_pair_stack (&local_stack) == 0)
{
again:
omp_set_lock (&lock);
if (size_int_pair_stack (&global_stack) == 0)
{
if (!idle)
busy--;
if (busy == 0)
{
omp_unset_lock (&lock);
break;
}
omp_unset_lock (&lock);
idle = true;
while (size_int_pair_stack (&global_stack) == 0
&& busy)
busy_wait ();
goto again;
}
if (idle)
busy++;
pop_int_pair_stack (&global_stack, &lo, &hi);
omp_unset_lock (&lock);
idle = false;
}
else
pop_int_pair_stack (&local_stack, &lo, &hi);
}
mid = partition (array, lo, hi);
if (mid - lo < hi - mid)
{
next_lo = mid;
next_hi = hi;
hi = mid - 1;
}
else
{
next_lo = lo;
next_hi = mid - 1;
lo = mid;
}
if (next_hi - next_lo < THRESHOLD)
insertsort (array, next_lo, next_hi);
else
{
if (size_int_pair_stack (&global_stack) < num_threads - 1)
{
int size;
omp_set_lock (&lock);
size = size_int_pair_stack (&global_stack);
if (size < num_threads - 1 && size < STACK_SIZE)
push_int_pair_stack (&global_stack, next_lo, next_hi);
else
push_int_pair_stack (&local_stack, next_lo, next_hi);
omp_unset_lock (&lock);
}
else
push_int_pair_stack (&local_stack, next_lo, next_hi);
}
}
}
omp_destroy_lock (&lock);
}
static void
sort2_1 (int *array, int lo, int hi, int num_threads, int *busy)
{
int mid;
if (hi - lo < THRESHOLD)
{
insertsort (array, lo, hi);
return;
}
mid = partition (array, lo, hi);
if (*busy >= num_threads)
{
sort2_1 (array, lo, mid - 1, num_threads, busy);
sort2_1 (array, mid, hi, num_threads, busy);
return;
}
#pragma omp atomic
*busy += 1;
#pragma omp parallel num_threads (2) \
firstprivate (array, lo, hi, mid, num_threads, busy)
{
if (omp_get_thread_num () == 0)
sort2_1 (array, lo, mid - 1, num_threads, busy);
else
{
sort2_1 (array, mid, hi, num_threads, busy);
#pragma omp atomic
*busy -= 1;
}
}
}
static void
sort2 (int *array, int count)
{
int num_threads;
int busy = 1;
#pragma omp parallel
#pragma omp single nowait
num_threads = omp_get_num_threads ();
sort2_1 (array, 0, count - 1, num_threads, &busy);
}
#if _OPENMP >= 200805
static void
sort3_1 (int *array, int lo, int hi)
{
int mid;
if (hi - lo < THRESHOLD)
{
insertsort (array, lo, hi);
return;
}
mid = partition (array, lo, hi);
#pragma omp task
sort3_1 (array, lo, mid - 1);
sort3_1 (array, mid, hi);
}
static void
sort3 (int *array, int count)
{
#pragma omp parallel
#pragma omp single
sort3_1 (array, 0, count - 1);
}
#endif
int
main (int argc, char **argv)
{
int i, count = 1000000;
double stime;
int *unsorted, *sorted, num_threads;
if (argc >= 2)
count = strtoul (argv[1], NULL, 0);
unsorted = malloc (count * sizeof (int));
sorted = malloc (count * sizeof (int));
if (unsorted == NULL || sorted == NULL)
{
puts ("allocation failure");
exit (1);
}
srand (0xdeadbeef);
for (i = 0; i < count; i++)
unsorted[i] = rand ();
omp_set_nested (1);
omp_set_dynamic (0);
#pragma omp parallel
#pragma omp single nowait
num_threads = omp_get_num_threads ();
printf ("Threads: %d\n", num_threads);
memcpy (sorted, unsorted, count * sizeof (int));
stime = omp_get_wtime ();
sort1 (sorted, count);
verify ("sort1", stime, sorted, count);
memcpy (sorted, unsorted, count * sizeof (int));
stime = omp_get_wtime ();
sort2 (sorted, count);
verify ("sort2", stime, sorted, count);
#if _OPENMP >= 200805
memcpy (sorted, unsorted, count * sizeof (int));
stime = omp_get_wtime ();
sort3 (sorted, count);
verify ("sort3", stime, sorted, count);
#endif
return 0;
}
|
for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for'}}
#pragma omp for foo
void test_no_clause() {
int i;
#pragma omp for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for' must be a for loop}}
#pragma omp for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for foo bar
for (i = 0; i < 16; ++i)
;
// At one time, this failed an assert.
// expected-error@+1 {{unexpected OpenMP clause 'num_teams' in directive '#pragma omp for'}}
#pragma omp for num_teams(3)
for (i = 0; i < 16; ++i)
;
// At one time, this error was reported twice.
// expected-error@+1 {{unexpected OpenMP clause 'uniform' in directive '#pragma omp for'}}
#pragma omp for uniform
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{unexpected OpenMP clause 'if' in directive '#pragma omp for'}}
#pragma omp for if(0)
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp parallel
#pragma omp for linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for' are ignored}}
#pragma omp for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
#pragma omp for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for collapse(2)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp for
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
idaFoodWeb_bnd_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* Based on idaFoodWeb_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDA: Food web problem.
*
* This example program (OpenMP version) uses the SUNBAND linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDA using the SUNBAND linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idaFoodWeb_bnd_omp
* To specify the number of threads at the command line, use
* % ./idaFoodWeb_bnd_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ida/ida.h>
#include <sunmatrix/sunmatrix_band.h>
#include <sunlinsol/sunlinsol_band.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_direct.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
N_Vector rates;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNMatrix A;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, retval;
sunindextype mu, ml;
realtype rtol, atol, t0, tout, tret;
int num_threads;
SUNContext ctx;
ida_mem = NULL;
A = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Create the SUNDIALS context object for this simulation */
retval = SUNContext_Create(NULL, &ctx);
if (check_retval(&retval, "SUNContext_Create", 1)) return 1;
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads, ctx);
webdata->acoef = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate(ctx);
if(check_retval((void *) ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
/* Setup band matrix and linear solver, and attach to IDA. */
mu = ml = NSMX;
A = SUNBandMatrix(NEQ, mu, ml, ctx);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
LS = SUNLinSol_Band(cc, A, ctx);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, A);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(mu, ml, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
SUNMatDestroy(A);
N_VDestroy_OpenMP(cc);
N_VDestroy_OpenMP(cp);
N_VDestroy_OpenMP(id);
SUNDlsMat_destroyMat(webdata->acoef);
N_VDestroy_OpenMP(webdata->rates);
free(webdata);
SUNContext_Free(&ctx);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
jx = jy = is = 0;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
sunindextype i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol)
{
printf("\nidaFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDA \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n",
(long int) mu, (long int) ml);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, nreLS, nni, nje, netf, ncfn;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumNonlinSolvIters(ida_mem, &nni);
check_retval(&retval, "IDAGetNumNonlinSolvIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn);
check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1);
retval = IDAGetNumJacEvals(ida_mem, &nje);
check_retval(&retval, "IDAGetNumJacEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nreLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre+nreLS);
printf("Number of Jacobian evaluations = %ld\n", nje);
printf("Number of nonlinear iterations = %ld\n", nni);
printf("Number of error test failures = %ld\n", netf);
printf("Number of nonlinear conv. failures = %ld\n", ncfn);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
jx = jy = is = 0;
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
levelset_convection_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Ruben Zorrilla
//
#if !defined(KRATOS_LEVELSET_CONVECTION_PROCESS_INCLUDED )
#define KRATOS_LEVELSET_CONVECTION_PROCESS_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/convection_diffusion_settings.h"
#include "includes/define.h"
#include "includes/kratos_flags.h"
#include "elements/levelset_convection_element_simplex.h"
#include "geometries/geometry_data.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "solving_strategies/strategies/residualbased_linear_strategy.h"
#include "utilities/variable_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and convects a level set distance
* on the top of it
*/
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver >
class LevelSetConvectionProcess
: public Process
{
public:
KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1);
KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS);
///@name Type Definitions
///@{
typedef Scheme< TSparseSpace, TDenseSpace > SchemeType;
typedef SolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType;
///@}
///@name Pointer Definitions
///@{
/// Pointer definition of LevelSetConvectionProcess
KRATOS_CLASS_POINTER_DEFINITION(LevelSetConvectionProcess);
///@}
///@name Life Cycle
///@{
/**
*/
LevelSetConvectionProcess(
Variable<double>& rLevelSetVar,
ModelPart& rBaseModelPart,
typename TLinearSolver::Pointer plinear_solver,
const double max_cfl = 1.0,
const double cross_wind_stabilization_factor = 0.7,
const unsigned int max_substeps = 0)
: mrBaseModelPart(rBaseModelPart),
mrModel(rBaseModelPart.GetModel()),
mrLevelSetVar(rLevelSetVar),
mMaxAllowedCFL(max_cfl),
mMaxSubsteps(max_substeps),
mAuxModelPartName(rBaseModelPart.Name() + "_DistanceConvectionPart")
{
KRATOS_TRY
// Check that there is at least one element and node in the model
const auto n_nodes = rBaseModelPart.NumberOfNodes();
const auto n_elems = rBaseModelPart.NumberOfElements();
KRATOS_ERROR_IF(n_nodes == 0) << "The model has no nodes." << std::endl;
KRATOS_ERROR_IF(n_elems == 0) << "The model has no elements." << std::endl;
VariableUtils().CheckVariableExists< Variable< double > >(rLevelSetVar, rBaseModelPart.Nodes());
VariableUtils().CheckVariableExists< Variable< array_1d < double, 3 > > >(VELOCITY, rBaseModelPart.Nodes());
if(TDim == 2){
KRATOS_ERROR_IF(rBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily() != GeometryData::Kratos_Triangle) <<
"In 2D the element type is expected to be a triangle" << std::endl;
} else if(TDim == 3) {
KRATOS_ERROR_IF(rBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily() != GeometryData::Kratos_Tetrahedra) <<
"In 3D the element type is expected to be a tetrahedra" << std::endl;
}
// Allocate if needed the variable DYNAMIC_TAU of the process info, and if it does not exist, set it to zero
if( rBaseModelPart.GetProcessInfo().Has(DYNAMIC_TAU) == false){
rBaseModelPart.GetProcessInfo().SetValue(DYNAMIC_TAU,0.0);
}
// Allocate if needed the variable CONVECTION_DIFFUSION_SETTINGS of the process info, and create it if it does not exist
if( rBaseModelPart.GetProcessInfo().Has(CONVECTION_DIFFUSION_SETTINGS) == false){
ConvectionDiffusionSettings::Pointer p_conv_diff_settings = Kratos::make_unique<ConvectionDiffusionSettings>();
rBaseModelPart.GetProcessInfo().SetValue(CONVECTION_DIFFUSION_SETTINGS, p_conv_diff_settings);
p_conv_diff_settings->SetUnknownVariable(rLevelSetVar);
p_conv_diff_settings->SetConvectionVariable(VELOCITY);
}
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
mDistancePartIsInitialized = false;
ReGenerateConvectionModelPart(rBaseModelPart);
// Generate a linear strategy
typename SchemeType::Pointer pscheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >();
typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverTypePointer;
bool CalculateReactions = false;
bool ReformDofAtEachIteration = false;
bool CalculateNormDxFlag = false;
BuilderSolverTypePointer pBuilderSolver = Kratos::make_shared< ResidualBasedBlockBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > >(plinear_solver);
mpSolvingStrategy = Kratos::make_unique< ResidualBasedLinearStrategy<TSparseSpace,TDenseSpace,TLinearSolver > >(
*mpDistanceModelPart,
pscheme,
pBuilderSolver,
CalculateReactions,
ReformDofAtEachIteration,
CalculateNormDxFlag);
mpSolvingStrategy->SetEchoLevel(0);
rBaseModelPart.GetProcessInfo().SetValue(CROSS_WIND_STABILIZATION_FACTOR, cross_wind_stabilization_factor);
//TODO: check flag DO_EXPENSIVE_CHECKS
mpSolvingStrategy->Check();
KRATOS_CATCH("")
}
/// Destructor.
~LevelSetConvectionProcess() override
{
mrModel.DeleteModelPart(mAuxModelPartName);
}
///@}
///@name Operators
///@{
void operator()(){
Execute();
}
///@}
///@name Operations
///@{
void Execute() override
{
KRATOS_TRY;
if(mDistancePartIsInitialized == false){
ReGenerateConvectionModelPart(mrBaseModelPart);
}
// Evaluate steps needed to achieve target max_cfl
const auto n_substep = EvaluateNumberOfSubsteps();
// Save the variables to be employed so that they can be restored after the solution
ProcessInfo& rCurrentProcessInfo = mpDistanceModelPart->GetProcessInfo();
const auto & r_previous_var = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS)->GetUnknownVariable();
const double previous_delta_time = rCurrentProcessInfo.GetValue(DELTA_TIME);
// Save current level set value and current and previous step velocity values
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(mpDistanceModelPart->NumberOfNodes()); ++i_node){
const auto it_node = mpDistanceModelPart->NodesBegin() + i_node;
mVelocity[i_node] = it_node->FastGetSolutionStepValue(VELOCITY);
mVelocityOld[i_node] = it_node->FastGetSolutionStepValue(VELOCITY,1);
mOldDistance[i_node] = it_node->FastGetSolutionStepValue(mrLevelSetVar,1);
}
const double dt = previous_delta_time / static_cast<double>(n_substep);
rCurrentProcessInfo.SetValue(DELTA_TIME, dt);
rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS)->SetUnknownVariable(mrLevelSetVar);
const int rank = mrBaseModelPart.GetCommunicator().MyPID();
for(unsigned int step = 1; step <= n_substep; ++step){
KRATOS_INFO_IF("LevelSetConvectionProcess", mpSolvingStrategy->GetEchoLevel() > 0 && rank == 0) <<
"Doing step "<< step << " of " << n_substep << std::endl;
// Compute shape functions of old and new step
const double Nold = 1.0 - static_cast<double>(step) / static_cast<double>(n_substep);
const double Nnew = 1.0 - Nold;
const double Nold_before = 1.0 - static_cast<double>(step-1) / static_cast<double>(n_substep);
const double Nnew_before = 1.0 - Nold_before;
// Emulate clone time step by copying the new distance onto the old one
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(mpDistanceModelPart->NumberOfNodes()); ++i_node){
auto it_node = mpDistanceModelPart->NodesBegin() + i_node;
const array_1d<double,3>& v = mVelocity[i_node];
const array_1d<double,3>& v_old = mVelocityOld[i_node];
it_node->FastGetSolutionStepValue(VELOCITY) = Nold * v_old + Nnew * v;
it_node->FastGetSolutionStepValue(VELOCITY, 1) = Nold_before * v_old + Nnew_before * v;
it_node->FastGetSolutionStepValue(mrLevelSetVar, 1) = it_node->FastGetSolutionStepValue(mrLevelSetVar);
}
mpSolvingStrategy->Solve();
}
// Reset the processinfo to the original settings
rCurrentProcessInfo.SetValue(DELTA_TIME, previous_delta_time);
rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS)->SetUnknownVariable(r_previous_var);
// Reset the velocities and levelset values to the one saved before the solution process
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(mpDistanceModelPart->NumberOfNodes()); ++i_node){
auto it_node = mpDistanceModelPart->NodesBegin() + i_node;
it_node->FastGetSolutionStepValue(VELOCITY) = mVelocity[i_node];
it_node->FastGetSolutionStepValue(VELOCITY,1) = mVelocityOld[i_node];
it_node->FastGetSolutionStepValue(mrLevelSetVar,1) = mOldDistance[i_node];
}
KRATOS_CATCH("")
}
void Clear() override{
mpDistanceModelPart->Nodes().clear();
mpDistanceModelPart->Conditions().clear();
mpDistanceModelPart->Elements().clear();
// mpDistanceModelPart->GetProcessInfo().clear();
mDistancePartIsInitialized = false;
mpSolvingStrategy->Clear();
mVelocity.clear();
mVelocityOld.clear();
mOldDistance.clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override {
return "LevelSetConvectionProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override {
rOStream << "LevelSetConvectionProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override {
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
ModelPart& mrBaseModelPart;
Model& mrModel;
ModelPart* mpDistanceModelPart;
Variable<double>& mrLevelSetVar;
const double mMaxAllowedCFL;
bool mDistancePartIsInitialized;
const unsigned int mMaxSubsteps;
std::vector< double > mOldDistance;
std::vector< array_1d<double,3> > mVelocity, mVelocityOld;
typename SolvingStrategyType::UniquePointer mpSolvingStrategy;
std::string mAuxModelPartName;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Constructor without linear solver for derived classes
LevelSetConvectionProcess(
Variable<double> &rLevelSetVar,
ModelPart &rBaseModelPart,
const double MaxCFL = 1.0,
const unsigned int MaxSubSteps = 0)
: mrBaseModelPart(rBaseModelPart),
mrModel(rBaseModelPart.GetModel()),
mrLevelSetVar(rLevelSetVar),
mMaxAllowedCFL(MaxCFL),
mMaxSubsteps(MaxSubSteps),
mAuxModelPartName(rBaseModelPart.Name() + "_DistanceConvectionPart")
{
mDistancePartIsInitialized = false;
}
virtual void ReGenerateConvectionModelPart(ModelPart& rBaseModelPart){
KRATOS_TRY
if (mrModel.HasModelPart(mAuxModelPartName)) {
mrModel.DeleteModelPart(mAuxModelPartName);
}
mpDistanceModelPart= &(mrModel.CreateModelPart(mAuxModelPartName));
// Check buffer size
const auto base_buffer_size = rBaseModelPart.GetBufferSize();
KRATOS_ERROR_IF(base_buffer_size < 2) <<
"Base model part buffer size is " << base_buffer_size << ". Set it to a minimum value of 2." << std::endl;
// Generate
mpDistanceModelPart->Nodes().clear();
mpDistanceModelPart->Conditions().clear();
mpDistanceModelPart->Elements().clear();
mpDistanceModelPart->SetProcessInfo(rBaseModelPart.pGetProcessInfo());
mpDistanceModelPart->SetBufferSize(base_buffer_size);
mpDistanceModelPart->SetProperties(rBaseModelPart.pProperties());
mpDistanceModelPart->Tables() = rBaseModelPart.Tables();
// Assigning the nodes to the new model part
mpDistanceModelPart->Nodes() = rBaseModelPart.Nodes();
// Ensure that the nodes have distance as a DOF
VariableUtils().AddDof< Variable < double> >(mrLevelSetVar, rBaseModelPart);
// Generating the elements
mpDistanceModelPart->Elements().reserve(rBaseModelPart.NumberOfElements());
for (auto it_elem = rBaseModelPart.ElementsBegin(); it_elem != rBaseModelPart.ElementsEnd(); ++it_elem){
Element::Pointer p_element = Kratos::make_intrusive< LevelSetConvectionElementSimplex < TDim, TDim+1 > >(
it_elem->Id(),
it_elem->pGetGeometry(),
it_elem->pGetProperties());
// Assign EXACTLY THE SAME GEOMETRY, so that memory is saved!!
p_element->pGetGeometry() = it_elem->pGetGeometry();
mpDistanceModelPart->Elements().push_back(p_element);
}
// Next is for mpi (but mpi would also imply calling an mpi strategy)
Communicator::Pointer pComm = rBaseModelPart.GetCommunicator().Create();
mpDistanceModelPart->SetCommunicator(pComm);
// Resize the arrays
const auto n_nodes = mpDistanceModelPart->NumberOfNodes();
mVelocity.resize(n_nodes);
mVelocityOld.resize(n_nodes);
mOldDistance.resize(n_nodes);
mDistancePartIsInitialized = true;
KRATOS_CATCH("")
}
unsigned int EvaluateNumberOfSubsteps(){
// First of all compute the cfl number
const auto n_elem = mpDistanceModelPart->NumberOfElements();
const double dt = mpDistanceModelPart->GetProcessInfo()[DELTA_TIME];
// Vector where each thread will store its maximum (VS does not support OpenMP reduce max)
int NumThreads = OpenMPUtils::GetNumThreads();
std::vector<double> list_of_max_local_cfl(NumThreads, 0.0);
//TODO: Update this loop to avoid using thread id
#pragma omp parallel shared(list_of_max_local_cfl)
for(int i_elem = 0; i_elem < static_cast<int>(n_elem); i_elem++){
const auto it_elem = mpDistanceModelPart->ElementsBegin() + i_elem;
Geometry< Node<3> >& r_geom = it_elem->GetGeometry();
double vol;
array_1d<double, TDim+1 > N;
BoundedMatrix<double, TDim+1, TDim > DN_DX;
GeometryUtils::CalculateGeometryData(r_geom, DN_DX, N, vol);
int k = OpenMPUtils::ThisThread();
double& max_cfl = list_of_max_local_cfl[k];
// Compute h
double h=0.0;
for(unsigned int i=0; i<TDim+1; i++){
double h_inv = 0.0;
for(unsigned int k=0; k<TDim; k++){
h_inv += DN_DX(i,k)*DN_DX(i,k);
}
h += 1.0/h_inv;
}
h = sqrt(h)/static_cast<double>(TDim+1);
// Get average velocity at the nodes
array_1d<double, 3 > vgauss = ZeroVector(3);
for(unsigned int i=0; i<TDim+1; i++){
vgauss += N[i]* r_geom[i].FastGetSolutionStepValue(VELOCITY);
}
double cfl_local = norm_2(vgauss) / h;
if(cfl_local > max_cfl){
max_cfl = cfl_local;
}
}
// Now we get the maximum at each thread level
double max_cfl_found = 0.0;
for (int k=0; k < NumThreads;k++){
if (max_cfl_found < list_of_max_local_cfl[k]){
max_cfl_found = list_of_max_local_cfl[k];
}
}
max_cfl_found *= dt;
// Synchronize maximum CFL between processes
max_cfl_found = mpDistanceModelPart->GetCommunicator().GetDataCommunicator().MaxAll(max_cfl_found);
unsigned int n_steps = static_cast<unsigned int>(max_cfl_found / mMaxAllowedCFL);
if(n_steps < 1){
n_steps = 1;
}
// Now we compare with the maximum set
if (mMaxSubsteps > 0 && mMaxSubsteps < n_steps){
n_steps = mMaxSubsteps;
}
return n_steps;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
LevelSetConvectionProcess& operator=(LevelSetConvectionProcess const& rOther);
/// Copy constructor.
//LevelSetConvectionProcess(LevelSetConvectionProcess const& rOther);
///@}
}; // Class LevelSetConvectionProcess
// Avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0));
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1));
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// Input stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::istream& operator >> (
std::istream& rIStream,
LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>& rThis);
/// Output stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::ostream& operator << (
std::ostream& rOStream,
const LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>& rThis){
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_LEVELSET_CONVECTION_PROCESS_INCLUDED defined
|
utilityGraphPartitioner.h | // ***********************************************************************
//
// Grappolo: A C++ library for graph clustering
// Mahantesh Halappanavar (hala@pnnl.gov)
// Pacific Northwest National Laboratory
//
// ***********************************************************************
//
// Copyright (2014) Battelle Memorial Institute
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#ifndef _graph_partitioner_
#define _graph_partitioner_
/*
int METIS PartGraphKway(idx_t *nvtxs, idx_t *ncon, idx_t *xadj, idx_t *adjncy,
idx_t *vwgt, idx_t *vsize, idx_t *adjwgt, idx_t *nparts, real_t *tpwgts,
real_t ubvec, idx_t *options, idx_t *objval, idx_t *part)
nvtxs: The number of vertices in the graph.
ncon: The number of balancing constraints. It should be at least 1.
xadj, adjncy: The adjacency structure of the graph as described in Section 5.5.
vwgt (NULL): The weights of the vertices as described in Section 5.5.
vsize (NULL): The size of the vertices for computing the total communication volume as described in Section 5.7.
adjwgt (NULL): The weights of the edges as described in Section 5.5.
nparts The number of parts to partition the graph.
tpwgts (NULL): This is an array of size npartsncon that specifies the desired weight for each partition and constraint.
The target partition weight for the ith partition and jth constraint is specified at tpwgts[i*ncon+j]
(the numbering for both partitions and constraints starts from 0). For each constraint, the sum of the
tpwgts[] entries must be 1.0 (i.e., \Sum_i tpwgts[i*ncon + j] = 1:0).
A NULL value can be passed to indicate that the graph should be equally divided among the partitions.
ubvec (NULL): This is an array of size ncon that specifies the allowed load imbalance tolerance for each constraint.
For the ith partition and jth constraint the allowed weight is the ubvec[j]*tpwgts[i*ncon+j] fraction
of the jth’s constraint total weight. The load imbalances must be greater than 1.0.
A NULL value can be passed indicating that the load imbalance tolerance for each constraint should
be 1.001 (for ncon=1) or 1.01 (for ncon<1).
options (NULL):
This is the array of options as described in Section 5.4.
The following options are valid for METIS PartGraphRecursive:
METIS_OPTION_CTYPE, METIS_OPTION_IPTYPE, METIS_OPTION_RTYPE,
METIS_OPTION_NO2HOP, METIS_OPTION_NCUTS, METIS_OPTION_NITER,
METIS_OPTION_SEED, METIS_OPTION_UFACTOR, METIS_OPTION_NUMBERING,
METIS_OPTION_DBGLVL
The following options are valid for METIS PartGraphKway:
METIS_OPTION_OBJTYPE, METIS_OPTION_CTYPE, METIS_OPTION_IPTYPE,
METIS_OPTION_RTYPE, METIS_OPTION_NO2HOP, METIS_OPTION_NCUTS,
METIS_OPTION_NITER, METIS_OPTION_UFACTOR, METIS_OPTION_MINCONN,
METIS_OPTION_CONTIG, METIS_OPTION_SEED, METIS_OPTION_NUMBERING,
METIS_OPTION_DBGLVL
objval: Upon successful completion, this variable stores the edge-cut or the total communication volume of
the partitioning solution. The value returned depends on the partitioning’s objective function.
part: This is a vector of size nvtxs that upon successful completion stores the partition vector of the graph.
The numbering of this vector starts from either 0 or 1, depending on the value of
options[METIS OPTION NUMBERING].
Returns
METIS OK Indicates that the function returned normally.
METIS ERROR INPUT Indicates an input error.
METIS ERROR MEMORY Indicates that it could not allocate the required memory.
METIS ERROR Indicates some other type of error.
*/
extern "C" {
#include "metis.h"
}
using namespace std;
/*
#ifdef __cplusplus
extern "C" {
#endif
//Multilevel k-way Partitioning
int METIS_PartGraphKway(idx_t *nvtxs, idx_t *ncon, idx_t *xadj, idx_t *adjncy,
idx_t *vwgt, idx_t *vsize, idx_t *adjwgt, idx_t *nparts, real_t *tpwgts,
real_t ubvec, idx_t *options, idx_t *objval, idx_t *part);
#ifdef __cplusplus
}
#endif
*/
//METIS Graph Partitioner:
void MetisGraphPartitioner( graph *G, comm_type *VertexPartitioning, int numParts ) {
printf("Within MetisGraphPartitioner(): \n");
printf("Number of partitions requested: %ld\n", numParts);
//Get the iterators for the graph:
comm_type NV = G->numVertices;
comm_type NE = G->numEdges;
comm_type *vtxPtr = G->edgeListPtrs;
edge *vtxInd = G->edgeList;
printf("|V|= %ld, |E|= %ld \n", NV, NE);
idx_t nvtxs = (idx_t) NV;
idx_t *xadj = (idx_t *) malloc ((NV+1) * sizeof(idx_t));
assert(xadj != 0);
#pragma omp parallel for
for(comm_type i=0; i<=NV; i++) {
xadj[i] = (idx_t) vtxPtr[i];
}
idx_t *adjncy = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjncy != 0);
#pragma omp parallel for
for(comm_type i=0; i<2*NE; i++) {
adjncy[i] = (idx_t) vtxInd[i].tail;
}
idx_t *adjwgt = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjwgt != 0);
#pragma omp parallel for
for(comm_type i=0; i<2*NE; i++) {
adjwgt[i] = (idx_t) vtxInd[i].weight;
}
idx_t nparts = (idx_t) numParts;
real_t ubvec = 1.03;
idx_t options[METIS_NOPTIONS];
METIS_SetDefaultOptions(options);
options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT; //Edgecut minimization
options[METIS_OPTION_CTYPE] = METIS_CTYPE_SHEM; //Sorted heavy-edge matching
options[METIS_OPTION_NUMBERING]= 0; //C-style numbering, starting from 0
//options[METIS_OPTION_NO2HOP]= 0; //Performs a 2-hop matching -- effective for power-law graphs
options[METIS_OPTION_NSEPS]= 10; //Number of iterations for refinement
//options[METIS_OPTION_UFACTOR] = 30;
idx_t ncon = 1; //Number of balancing constraints (at least 1)
idx_t objval = 0; //Will contain the edgecut (or total communication)
idx_t *part = (idx_t *) malloc (NV * sizeof(idx_t)); //Partition information
assert(part != 0);
int returnVal = METIS_PartGraphKway(&nvtxs, &ncon, xadj, adjncy, NULL, NULL, adjwgt,
&nparts, NULL, NULL, options, &objval, part);
if(returnVal == METIS_OK)
printf("Edge cut: %ld\n", objval);
else {
if(returnVal == METIS_ERROR_MEMORY)
printf("Metis could not allocate memory.\n");
else
printf("Metis error: %ld\n", returnVal);
}
#pragma omp parallel for
for(comm_type i=0; i<=NV; i++) {
VertexPartitioning[i] = (comm_type) part[i]; //Do explicit typecasts
}
//Cleaup:
free(xadj); free(adjncy); free(adjwgt);
free(part);
printf("Returning back from Metis\n");
}
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(
SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> Completer = llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedConstructsKind {
/// Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
std::vector<IdentifierInfo *> &Ident,
std::vector<SourceLocation> &NamespaceLoc,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
// HLSL Change Starts
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant
namespace hlsl {
struct UnusualAnnotation;
}
// HLSL Change Ends
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPClause;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
class CXXThisExpr; // HLSL Change
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
if (getLangOpts().ModulesHideInternalLinkage)
return isVisible(Old) || New->isExternallyVisible();
return true;
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
// HLSL Change Begin
// The HLSL rewriter doesn't define a default matrix pack,
// so we must preserve the lack of annotations to avoid changing semantics.
bool HasDefaultMatrixPack = false;
// Uses of #pragma pack_matrix change the default pack.
bool DefaultMatrixPackRowMajor = false;
// HLSL Change End.
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
// std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// \brief counter for internal MS Asm label names.
unsigned MSAsmLabelNameCounter;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
unsigned deduceWeakPropertyFromType(QualType T) {
if ((getLangOpts().getGC() != LangOptions::NonGC &&
T.isObjCGCWeak()) ||
(getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_Weak))
return ObjCDeclSpec::DQ_PR_weak;
return 0;
}
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
// HLSL Change - FIX - We should move param mods to parameter QualTypes
QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI,
ArrayRef<hlsl::ParameterModifier> ParamMods);
// HLSL Change - End
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed)
return;
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
VisibleModuleSet VisibleModules;
llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *)
: Kind(NC_Keyword) {
}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
// HLSL Change Starts
// This enumeration is used to determine whether a variable declaration
// should shadow a prior declaration rather than merging.
enum ShadowMergeState {
ShadowMergeState_Disallowed, // shadowing is not allowed
ShadowMergeState_Possible, // shadowing is possible (but may not occur)
ShadowMergeState_Effective // the declaration should shadow a prior one
};
// HLSL Change Ends
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition =
nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
bool NeedDefinition, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT;
TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. If no such method or only one method found, function returns
/// false; otherwise, it returns true
bool CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool instance);
bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R,
bool receiverIdOrClass);
void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool OdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
// HLSL Change Begins
bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc,
UnaryExprOrTypeTrait ExprKind);
// HLSL Change Ends
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
// HLSL Change Starts
//===---------------------------- HLSL Features -------------------------===//
/// cbuffer/tbuffer
llvm::SmallVector<Decl*, 1> HLSLBuffers;
Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
std::vector<hlsl::UnusualAnnotation *>& BufferAttributes,
SourceLocation LBrace);
void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
Decl* getActiveHLSLBuffer() const;
void ActOnStartHLSLBufferView();
bool IsOnHLSLBufferView();
Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc,
DeclGroupPtrTy &dcl, bool iscbuf);
// HLSL Change Ends
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addRestrictAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
QualType performLambdaInitCaptureInitialization(SourceLocation Loc,
bool ByRef, IdentifierInfo *Id, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType, IdentifierInfo *Id, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXMemberDefaultArgs(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
bool NextIsLess = false); // HLSL Change - additional special case flag
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool NextIsLess = false); // HLSL Change - additional special case flag
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(),
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = nullptr,
ObjCContainerDecl *lexicalDC = nullptr);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...).
void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
// OpenMP directives and clauses.
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op,
OpenMPClauseKind CKind);
public:
/// \brief Check if the specified variable is used in a private clause in
/// Checks if the specified variable is used in one of the private
/// clauses in OpenMP constructs.
bool IsOpenMPCapturedVar(VarDecl *VD);
/// OpenMP constructs.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
unsigned Argument, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation KindLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList,
Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
// HLSL Change Starts - checking array subscript access to vector or matrix member
void CheckHLSLArrayAccess(const Expr *expr);
// HLSL Change ends
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
ArrayRef<const Expr *> Args, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinCpuSupports(CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs);
bool FormatStringHasSArg(const StringLiteral *FExpr);
bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
// HLSL Change Starts
bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter);
bool DiagnoseHLSLLookup(const LookupResult &R);
void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl);
// HLSL Change Ends
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// HLSL Change Begin - adjust this from T* to T&-like
CXXThisExpr *genereateHLSLThis(SourceLocation Loc, QualType ThisType,
bool isImplicit);
// HLSL Change End - adjust this from T* to T&-like
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
cpu_ctc.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#include <dmlc/omp.h>
#include "ctc_helper.h"
namespace mxnet_warpctc {
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace,
int blank_label) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
workspace_(workspace), blank_label_(blank_label) {
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int blank_label, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used, int blank_label,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
void* workspace_;
int blank_label_;
void log_softmax(const ProbT* const activations, ProbT* log_probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const log_probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* log_probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
int blank_label,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, blank_label, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int blank_label, int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = blank_label;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = blank_label;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::log_softmax(const ProbT* const activations, ProbT* log_probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r) {
denom += std::exp(activations[r + col_offset] - max_activation);
}
for(int r = 0; r < alphabet_size_; ++r) {
log_probs[r + col_offset] = activations[r + col_offset]
- max_activation - std::log(denom);
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const log_probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(log_probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, log_probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* log_probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = log_probs[labels[i]];
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + log_probs[blank_label_ + idx3];
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + log_probs[labels[i] + idx3];
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = log_probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)];
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
log_probs[idx3] == ctc_helper::neg_inf<ProbT>()) {
grad[idx3] = std::exp(log_probs[idx3]);
} else {
grad[idx3] = std::exp(log_probs[idx3])
- std::exp(output[i] - log_probs[idx3] - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + log_probs[labels[i] + idx3];
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + log_probs[blank_label_ + idx3];
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
log_probs[idx3] == ctc_helper::neg_inf<ProbT>()) {
grad[idx3] = std::exp(log_probs[idx3]);
} else {
grad[idx3] = std::exp(log_probs[idx3])
- std::exp(output[i] - log_probs[idx3] - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* log_probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
log_softmax(activations, log_probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
log_probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* log_probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
log_softmax(activations, log_probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes, blank_label_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(log_probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
} // mxnet_warpctc
|
tagger.c | #include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <strings.h>
#include "../dictionary/dictionary_generator.h"
#include "../lib/hashmap.h"
#include "../corpus/corpus_io.h"
#include "../rules/rules.h"
#include "tags.h"
#include "tagger.h"
void apply_initial_tags(corpus_t corpus, hashmap_t map){
#pragma omp parallel for num_threads(4)
for(size_t i = 0; i < corpus.num_lines; i++){
apply_initial_tag(corpus.words[i], map, i, corpus);
}
}
/* applies initial tag based on tag frequency for a word
* args:
* char *:
* pointer to start of a line in mem map
* map_t:
* the hashmap with tag frequencies for each word
*/
void apply_initial_tag(char *word, hashmap_t hash_map, size_t index, corpus_t corpus){
int* hashed_value = (int *)hashmap_get(&hash_map, word);
if(ignore_tag(corpus.human_tags[index])){
apply_initial_unknown_word_tag(word, index, corpus);
}
else if(hashed_value){
corpus.machine_tags[index] = *hashed_value;
}
}
/* called if the word cannot be found in the hashmap (unknown).
This runs basic checks for a small number of common tags before giving up */
void apply_initial_unknown_word_tag(char *word, size_t index, corpus_t corpus){
int num_type;
int tag;
if(corpus.info[index].ignore_flag){
if(corpus.human_tags[index] == NUL)
corpus.machine_tags[index] = NUL;
else if((tag = get_ignored_tag(word)))
corpus.machine_tags[index] = tag;
else{
// printf("Error: ignore flag was applied to a word, but a tag was not found.\n"
// "The unknown word is at index: %lu\n"
// "The word is %s\n",
// index, corpus.words[index]);
// exit(EXIT_FAILURE);
corpus.machine_tags[index] = NUL;
}
}
else if((tag = number_type(word))!=0)
corpus.machine_tags[index] = tag;
else if((tag = proper_noun_type(word, corpus, index))!=0)
corpus.machine_tags[index] = tag;
else if(tag == 0)
corpus.machine_tags[index] = FU; // "unclassified"
/* relies on properties of the word for tagging */
}
int proper_noun_type(char * word, corpus_t corpus, size_t index){
/* word is probably proper noun if previous tag is not end of sentence and it is cap */
if (isupper(word[0]) &&
corpus.machine_tags[index] != PER &&
corpus.machine_tags[index] != QUE &&
corpus.machine_tags[index] != DQ &&
corpus.machine_tags[index] != EXC){
if(word[word_length(word)-1] == 's')
return NN2;
else
return NN1;
}
else
return 0;
}
int number_type(char * word){
bool is_hyphenated = false;
bool is_number = false;
char c = word[0];
int i = 0;
int tag;
while(c != '\0'){
c = word[i];
is_number = (isdigit(c) || c == '.' || c == ',');
is_hyphenated = (c == '-');
if(!is_number && !is_hyphenated)
return 0; // no cardinal tag found
i++;
}
if(is_number){
if(is_hyphenated)
tag = MCMC;
else if(i == 1 && word[0] == '1')
tag = MC1;
else
tag = MC;
}
return tag;
}
int get_ignored_tag(char *word){
switch(word[0]){
case ':':
return COL;
case ';':
return SCOL;
case '(':
return LPAR;
case ')':
return RPAR;
case '.':
return PER;
case ',':
return COM;
case '?':
return QUE;
case '!':
return EXC;
case '"':
return DQ;
default:
return 0;
}
}
void apply_rules_to_corpus(rules_list_t rules, corpus_t corpus){
for(int i = 0; i < rules.length; i++)
apply_rule_to_corpus(rules.rules[i], corpus);
}
void apply_rules_to_indices(rules_list_t rules, corpus_t corpus){
for(int i = 0; i < rules.length; i++)
apply_rule_to_corpus(rules.rules[i], corpus);
}
/* applies rules from a text file (not part of machine learning) */
void apply_rule_to_corpus(contextual_rule_t rule, corpus_t corpus){
//store indices rule applies to, then alter the tags
int64_t indices[corpus.num_lines];
size_t index = 0;
for(int i = 0; i < corpus.num_lines; i++)
if(check_contextual_rule(rule, i, corpus))
indices[index++] = i;
for(int i = 0; i < index; i++)
corpus.machine_tags[indices[i]] = rule.tag2;
}
/* checks if a contextual (known word)
rule applies, given contextual information */
bool check_contextual_rule(contextual_rule_t rule, size_t index, corpus_t corpus){
return corpus.machine_tags[index] == rule.tag1 && contextual_rules[rule.triggerfn](corpus, index, rule.arg1, rule.arg2);
}
|
omp_parallel_if.c | <ompts:test>
<ompts:testdescription>Test which checks the if option of the parallel construct.</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp parallel if</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include <unistd.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_if</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int i;
int sum;
int known_sum;
int mysum;
int control=1;
</ompts:orphan:vars>
sum =0;
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 ;
#pragma omp parallel private(i) <ompts:check>if(control==0)</ompts:check>
{
<ompts:orphan>
mysum = 0;
for (i = 1; i <= LOOPCOUNT; i++)
{
mysum = mysum + i;
}
#pragma omp critical
{
sum = sum + mysum;
} /* end of critical */
</ompts:orphan>
} /* end of parallel */
return (known_sum == sum);
}
</ompts:testcode>
</ompts:test>
|
distance.h | #pragma once
#include <utils.h>
#ifdef _WINDOWS
#include <immintrin.h>
#include <smmintrin.h>
#include <tmmintrin.h>
#include <intrin.h>
#else
#include <immintrin.h>
#endif
#include <cosine_similarity.h>
#include <iostream>
namespace {
static inline __m128 _mm_mulhi_epi8(__m128i X) {
__m128i zero = _mm_setzero_si128();
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_setzero_si128(), _mm_madd_epi16(xhi, xhi)));
}
static inline __m128 _mm_mulhi_epi8_shift32(__m128i X) {
__m128i zero = _mm_setzero_si128();
X = _mm_srli_epi64(X, 32);
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_setzero_si128(), _mm_madd_epi16(xhi, xhi)));
}
static inline __m128 _mm_mul_epi8(__m128i X, __m128i Y) {
__m128i zero = _mm_setzero_si128();
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i sign_y = _mm_cmplt_epi8(Y, zero);
__m128i xlo = _mm_unpacklo_epi8(X, sign_x);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
__m128i ylo = _mm_unpacklo_epi8(Y, sign_y);
__m128i yhi = _mm_unpackhi_epi8(Y, sign_y);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_madd_epi16(xlo, ylo), _mm_madd_epi16(xhi, yhi)));
}
static inline __m128 _mm_mul_epi8(__m128i X) {
__m128i zero = _mm_setzero_si128();
__m128i sign_x = _mm_cmplt_epi8(X, zero);
__m128i xlo = _mm_unpacklo_epi8(X, sign_x);
__m128i xhi = _mm_unpackhi_epi8(X, sign_x);
return _mm_cvtepi32_ps(
_mm_add_epi32(_mm_madd_epi16(xlo, xlo), _mm_madd_epi16(xhi, xhi)));
}
static inline __m128 _mm_mul32_pi8(__m128i X, __m128i Y) {
__m128i xlo = _mm_cvtepi8_epi16(X), ylo = _mm_cvtepi8_epi16(Y);
return _mm_cvtepi32_ps(
_mm_unpacklo_epi32(_mm_madd_epi16(xlo, ylo), _mm_setzero_si128()));
}
static inline __m256 _mm256_mul_epi8(__m256i X, __m256i Y) {
__m256i zero = _mm256_setzero_si256();
__m256i sign_x = _mm256_cmpgt_epi8(zero, X);
__m256i sign_y = _mm256_cmpgt_epi8(zero, Y);
__m256i xlo = _mm256_unpacklo_epi8(X, sign_x);
__m256i xhi = _mm256_unpackhi_epi8(X, sign_x);
__m256i ylo = _mm256_unpacklo_epi8(Y, sign_y);
__m256i yhi = _mm256_unpackhi_epi8(Y, sign_y);
return _mm256_cvtepi32_ps(_mm256_add_epi32(_mm256_madd_epi16(xlo, ylo),
_mm256_madd_epi16(xhi, yhi)));
}
static inline __m256 _mm256_mul32_pi8(__m128i X, __m128i Y) {
__m256i xlo = _mm256_cvtepi8_epi16(X), ylo = _mm256_cvtepi8_epi16(Y);
return _mm256_blend_ps(_mm256_cvtepi32_ps(_mm256_madd_epi16(xlo, ylo)),
_mm256_setzero_ps(), 252);
}
static inline float _mm256_reduce_add_ps(__m256 x) {
/* ( x3+x7, x2+x6, x1+x5, x0+x4 ) */
const __m128 x128 =
_mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x));
/* ( -, -, x1+x3+x5+x7, x0+x2+x4+x6 ) */
const __m128 x64 = _mm_add_ps(x128, _mm_movehl_ps(x128, x128));
/* ( -, -, -, x0+x1+x2+x3+x4+x5+x6+x7 ) */
const __m128 x32 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55));
/* Conversion to float is a no-op on x86-64 */
return _mm_cvtss_f32(x32);
}
} // namespace
namespace diskann {
// enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 };
template<typename T>
class Distance {
public:
virtual float compare(const T *a, const T *b, unsigned length) const = 0;
virtual ~Distance() {
}
};
template<typename T>
class DistanceCosine : public Distance<T> {
float compare(const T *a, const T *b, unsigned length) const {
return diskann::compute_cosine_similarity<T>(a, b, length);
}
};
class DistanceL2Int8 : public Distance<int8_t> {
public:
float compare(const int8_t *a, const int8_t *b, unsigned size) const {
int32_t result = 0;
#ifdef _WINDOWS
#ifdef USE_AVX2
__m256 r = _mm256_setzero_ps();
char * pX = (char *) a, *pY = (char *) b;
while (size >= 32) {
__m256i r1 = _mm256_subs_epi8(_mm256_loadu_si256((__m256i *) pX),
_mm256_loadu_si256((__m256i *) pY));
r = _mm256_add_ps(r, _mm256_mul_epi8(r1, r1));
pX += 32;
pY += 32;
size -= 32;
}
while (size > 0) {
__m128i r2 = _mm_subs_epi8(_mm_loadu_si128((__m128i *) pX),
_mm_loadu_si128((__m128i *) pY));
r = _mm256_add_ps(r, _mm256_mul32_pi8(r2, r2));
pX += 4;
pY += 4;
size -= 4;
}
r = _mm256_hadd_ps(_mm256_hadd_ps(r, r), r);
return r.m256_f32[0] + r.m256_f32[4];
#else
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) size; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
#endif
#else
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) size; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
#endif
}
};
class DistanceL2UInt8 : public Distance<uint8_t> {
public:
float compare(const uint8_t *a, const uint8_t *b, unsigned size) const {
uint32_t result = 0;
#ifndef _WINDOWS
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
#endif
for (_s32 i = 0; i < (_s32) size; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
}
};
class DistanceL2 : public Distance<float> {
public:
#ifndef _WINDOWS
float compare(const float *a, const float *b, unsigned size) const
__attribute__((hot)) {
a = (const float *) __builtin_assume_aligned(a, 32);
b = (const float *) __builtin_assume_aligned(b, 32);
#else
float compare(const float *a, const float *b, unsigned size) const {
#endif
float result = 0;
#ifdef USE_AVX2
// assume size is divisible by 8
_u16 niters = size / 8;
__m256 sum = _mm256_setzero_ps();
for (_u16 j = 0; j < niters; j++) {
// scope is a[8j:8j+7], b[8j:8j+7]
// load a_vec
if (j < (niters - 1)) {
_mm_prefetch((char *) (a + 8 * (j + 1)), _MM_HINT_T0);
_mm_prefetch((char *) (b + 8 * (j + 1)), _MM_HINT_T0);
}
__m256 a_vec = _mm256_load_ps(a + 8 * j);
// load b_vec
__m256 b_vec = _mm256_load_ps(b + 8 * j);
// a_vec - b_vec
__m256 tmp_vec = _mm256_sub_ps(a_vec, b_vec);
/*
// (a_vec - b_vec)**2
__m256 tmp_vec2 = _mm256_mul_ps(tmp_vec, tmp_vec);
// accumulate sum
sum = _mm256_add_ps(sum, tmp_vec2);
*/
// sum = (tmp_vec**2) + sum
sum = _mm256_fmadd_ps(tmp_vec, tmp_vec, sum);
}
// horizontal add sum
result = _mm256_reduce_add_ps(sum);
#else
#ifndef _WINDOWS
#pragma omp simd reduction(+ : result) aligned(a, b : 32)
#endif
for (_s32 i = 0; i < (_s32) size; i++) {
result += (a[i] - b[i]) * (a[i] - b[i]);
}
#endif
return result;
}
};
// Slow implementations of the distance functions for machines without AVX2
template<typename T>
class SlowDistanceL2Int : public Distance<T> {
virtual float compare(const T *a, const T *b, unsigned length) const {
uint32_t result = 0;
for (_u32 i = 0; i < length; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
}
};
class SlowDistanceL2Float : public Distance<float> {
virtual float compare(const float *a, const float *b,
unsigned length) const {
float result = 0.0f;
for (_u32 i = 0; i < length; i++) {
result += (a[i] - b[i]) * (a[i] - b[i]);
}
return result;
}
};
class AVXDistanceL2Int8 : public Distance<int8_t> {
public:
virtual float compare(const int8_t *a, const int8_t *b,
unsigned int length) const {
#ifndef _WINDOWS
int32_t result = 0;
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) length; i++) {
result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) *
((int32_t)((int16_t) a[i] - (int16_t) b[i]));
}
return (float) result;
}
#else
__m128 r = _mm_setzero_ps();
__m128i r1;
while (length >= 16) {
r1 = _mm_subs_epi8(_mm_load_si128((__m128i *) a),
_mm_load_si128((__m128i *) b));
r = _mm_add_ps(r, _mm_mul_epi8(r1));
a += 16;
b += 16;
length -= 16;
}
r = _mm_hadd_ps(_mm_hadd_ps(r, r), r);
float res = r.m128_f32[0];
if (length >= 8) {
__m128 r2 = _mm_setzero_ps();
__m128i r3 = _mm_subs_epi8(_mm_load_si128((__m128i *) (a - 8)),
_mm_load_si128((__m128i *) (b - 8)));
r2 = _mm_add_ps(r2, _mm_mulhi_epi8(r3));
a += 8;
b += 8;
length -= 8;
r2 = _mm_hadd_ps(_mm_hadd_ps(r2, r2), r2);
res += r2.m128_f32[0];
}
if (length >= 4) {
__m128 r2 = _mm_setzero_ps();
__m128i r3 = _mm_subs_epi8(_mm_load_si128((__m128i *) (a - 12)),
_mm_load_si128((__m128i *) (b - 12)));
r2 = _mm_add_ps(r2, _mm_mulhi_epi8_shift32(r3));
res += r2.m128_f32[0] + r2.m128_f32[1];
}
return res;
}
#endif
};
class AVXDistanceL2Float : public Distance<float> {
public:
virtual float compare(const float *a, const float *b,
unsigned int length) const {
#ifndef _WINDOWS
float result = 0;
#pragma omp simd reduction(+ : result) aligned(a, b : 8)
for (_s32 i = 0; i < (_s32) length; i++) {
result += (a[i] - b[i]) * (a[i] - b[i]);
}
return result;
}
#else
__m128 diff, v1, v2;
__m128 sum = _mm_set1_ps(0);
while (length >= 4) {
v1 = _mm_loadu_ps(a);
a += 4;
v2 = _mm_loadu_ps(b);
b += 4;
diff = _mm_sub_ps(v1, v2);
sum = _mm_add_ps(sum, _mm_mul_ps(diff, diff));
length -= 4;
}
return sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2] +
sum.m128_f32[3];
}
#endif
};
template<typename T>
class DistanceInnerProduct : public Distance<T> {
public:
float inner_product(const T *a, const T *b, unsigned size) const {
float result = 0;
#ifdef __GNUC__
#ifdef __SSE2__
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1); \
tmp2 = _mm256_loadu_ps(addr2); \
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (size + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = (float *) a;
const float *r = (float *) b;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__((aligned(32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_loadu_ps(unpack);
if (DR) {
AVX_DOT(e_l, e_r, sum, l0, r0);
}
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] +
unpack[5] + unpack[6] + unpack[7];
#else
#ifdef __AVX__
#define SSE_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm128_loadu_ps(addr1); \
tmp2 = _mm128_loadu_ps(addr2); \
tmp1 = _mm128_mul_ps(tmp1, tmp2); \
dest = _mm128_add_ps(dest, tmp1);
__m128 sum;
__m128 l0, l1, l2, l3;
__m128 r0, r1, r2, r3;
unsigned D = (size + 3) & ~3U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = a;
const float *r = b;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[4] __attribute__((aligned(16))) = {0, 0, 0, 0};
sum = _mm_load_ps(unpack);
switch (DR) {
case 12:
SSE_DOT(e_l + 8, e_r + 8, sum, l2, r2);
case 8:
SSE_DOT(e_l + 4, e_r + 4, sum, l1, r1);
case 4:
SSE_DOT(e_l, e_r, sum, l0, r0);
default:
break;
}
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
SSE_DOT(l, r, sum, l0, r0);
SSE_DOT(l + 4, r + 4, sum, l1, r1);
SSE_DOT(l + 8, r + 8, sum, l2, r2);
SSE_DOT(l + 12, r + 12, sum, l3, r3);
}
_mm_storeu_ps(unpack, sum);
result += unpack[0] + unpack[1] + unpack[2] + unpack[3];
#else
float dot0, dot1, dot2, dot3;
const float *last = a + size;
const float *unroll_group = last - 3;
/* Process 4 items with each loop for efficiency. */
while (a < unroll_group) {
dot0 = a[0] * b[0];
dot1 = a[1] * b[1];
dot2 = a[2] * b[2];
dot3 = a[3] * b[3];
result += dot0 + dot1 + dot2 + dot3;
a += 4;
b += 4;
}
/* Process last 0-3 pixels. Not needed for standard vector lengths. */
while (a < last) {
result += *a++ * *b++;
}
#endif
#endif
#endif
return result;
}
float compare(const T *a, const T *b, unsigned size)
const { // since we use normally minimization objective for distance
// comparisons, we are returning 1/x.
float result = inner_product(a, b, size);
// if (result < 0)
// return std::numeric_limits<float>::max();
// else
return -result;
}
};
template<typename T>
class DistanceFastL2
: public DistanceInnerProduct<T> { // currently defined only for float.
// templated for future use.
public:
float norm(const T *a, unsigned size) const {
float result = 0;
#ifdef __GNUC__
#ifdef __SSE2__
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (size + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = (float *) a;
const float *e_l = l + DD;
float unpack[8] __attribute__((aligned(32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_loadu_ps(unpack);
if (DR) {
AVX_L2NORM(e_l, sum, l0);
}
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] +
unpack[5] + unpack[6] + unpack[7];
#else
#ifdef __AVX__
#define SSE_L2NORM(addr, dest, tmp) \
tmp = _mm128_loadu_ps(addr); \
tmp = _mm128_mul_ps(tmp, tmp); \
dest = _mm128_add_ps(dest, tmp);
__m128 sum;
__m128 l0, l1, l2, l3;
unsigned D = (size + 3) & ~3U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = a;
const float *e_l = l + DD;
float unpack[4] __attribute__((aligned(16))) = {0, 0, 0, 0};
sum = _mm_load_ps(unpack);
switch (DR) {
case 12:
SSE_L2NORM(e_l + 8, sum, l2);
case 8:
SSE_L2NORM(e_l + 4, sum, l1);
case 4:
SSE_L2NORM(e_l, sum, l0);
default:
break;
}
for (unsigned i = 0; i < DD; i += 16, l += 16) {
SSE_L2NORM(l, sum, l0);
SSE_L2NORM(l + 4, sum, l1);
SSE_L2NORM(l + 8, sum, l2);
SSE_L2NORM(l + 12, sum, l3);
}
_mm_storeu_ps(unpack, sum);
result += unpack[0] + unpack[1] + unpack[2] + unpack[3];
#else
float dot0, dot1, dot2, dot3;
const float *last = a + size;
const float *unroll_group = last - 3;
/* Process 4 items with each loop for efficiency. */
while (a < unroll_group) {
dot0 = a[0] * a[0];
dot1 = a[1] * a[1];
dot2 = a[2] * a[2];
dot3 = a[3] * a[3];
result += dot0 + dot1 + dot2 + dot3;
a += 4;
}
/* Process last 0-3 pixels. Not needed for standard vector lengths. */
while (a < last) {
result += (*a) * (*a);
a++;
}
#endif
#endif
#endif
return result;
}
using DistanceInnerProduct<T>::compare;
float compare(const T *a, const T *b, float norm,
unsigned size) const { // not implement
float result = -2 * DistanceInnerProduct<T>::inner_product(a, b, size);
result += norm;
return result;
}
};
} // namespace diskann
|
GB_unaryop__minv_uint32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_int32
// op(A') function: GB_tran__minv_uint32_int32
// C type: uint32_t
// A type: int32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_int32
(
uint32_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.